glance-12.0.0/0000775000567000056710000000000012701407204014161 5ustar jenkinsjenkins00000000000000glance-12.0.0/etc/0000775000567000056710000000000012701407204014734 5ustar jenkinsjenkins00000000000000glance-12.0.0/etc/property-protections-roles.conf.sample0000664000567000056710000000211512701407047024444 0ustar jenkinsjenkins00000000000000# property-protections-roles.conf.sample # # This file is an example config file for when # property_protection_rule_format=roles is enabled. # # Specify regular expression for which properties will be protected in [] # For each section, specify CRUD permissions. # The property rules will be applied in the order specified. Once # a match is found the remaining property rules will not be applied. # # WARNING: # * If the reg ex specified below does not compile, then # glance-api service will not start. (Guide for reg ex python compiler used: # http://docs.python.org/2/library/re.html#regular-expression-syntax) # * If an operation(create, read, update, delete) is not specified or misspelt # then the glance-api service will not start. # So, remember, with GREAT POWER comes GREAT RESPONSIBILITY! # # NOTE: Multiple roles can be specified for a given operation. These roles must # be comma separated. [^x_.*] create = admin,member,_member_ read = admin,member,_member_ update = admin,member,_member_ delete = admin,member,_member_ [.*] create = admin read = admin update = admin delete = admin glance-12.0.0/etc/metadefs/0000775000567000056710000000000012701407204016524 5ustar jenkinsjenkins00000000000000glance-12.0.0/etc/metadefs/README0000664000567000056710000000041612701407047017412 0ustar jenkinsjenkins00000000000000This directory contains predefined namespaces for Glance Metadata Definitions Catalog. Files from this directory can be loaded into the database using db_load_metadefs command for glance-manage. Similarly you can unload the definitions using db_unload_metadefs command. glance-12.0.0/etc/metadefs/compute-trust.json0000664000567000056710000000226012701407047022257 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::Trust", "display_name": "Trusted Compute Pools (Intel® TXT)", "description": "Trusted compute pools with Intel® Trusted Execution Technology (Intel® TXT) support IT compliance by protecting virtualized data centers - private, public, and hybrid clouds against attacks toward hypervisor and BIOS, firmware, and other pre-launch software components. The Nova trust scheduling filter must be enabled and configured with the trust attestation service in order to use this feature.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "properties": { "trust:trusted_host": { "title": "Intel® TXT attestation", "description": "Select to ensure that node has been attested by Intel® Trusted Execution Technology (Intel® TXT). The Nova trust scheduling filter must be enabled and configured with the trust attestation service in order to use this feature.", "type": "string", "enum": [ "trusted", "untrusted", "unknown" ] } } }glance-12.0.0/etc/metadefs/compute-cpu-pinning.json0000664000567000056710000000214412701407047023326 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::CPUPinning", "display_name": "CPU Pinning", "description": "This provides the preferred CPU Pinning policy when pinning vCPU of the guest to pCPU of the host. Default 'shared' option doesn't change current default guest CPU placement policy. 'Dedicated' will make guest CPU(s) to be strictly pinned to set of host CPU(s). See also https://wiki.openstack.org/wiki/VirtDriverGuestCPUMemoryPlacement", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image" }, { "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "properties": { "cpu_policy": { "title": "CPU Pinning policy", "description": "Type of CPU Pinning policy.", "type": "string", "enum": [ "shared", "dedicated" ] } } } glance-12.0.0/etc/metadefs/software-databases.json0000664000567000056710000004445312701407047023215 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Software::DBMS", "display_name": "Database Software", "description": "A database is an organized collection of data. The data is typically organized to model aspects of reality in a way that supports processes requiring information. Database management systems are computer software applications that interact with the user, other applications, and the database itself to capture and analyze data. (http://en.wikipedia.org/wiki/Database)", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" }, { "name": "OS::Nova::Server", "properties_target": "metadata" }, { "name": "OS::Trove::Instance" } ], "objects": [ { "name": "MySQL", "description": "MySQL is an object-relational database management system (ORDBMS). The MySQL development project has made its source code available under the terms of the GNU General Public License, as well as under a variety of proprietary agreements. MySQL was owned and sponsored by a single for-profit firm, the Swedish company MySQL AB, now owned by Oracle Corporation. MySQL is a popular choice of database for use in web applications, and is a central component of the widely used LAMP open source web application software stack (and other 'AMP' stacks). (http://en.wikipedia.org/wiki/MySQL)", "properties": { "sw_database_mysql_version": { "title": "Version", "description": "The specific version of MySQL.", "type": "string" }, "sw_database_mysql_listen_port": { "title": "Listen Port", "description": "The configured TCP/IP port which MySQL listens for incoming connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 3606 }, "sw_database_mysql_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "root" } } }, { "name": "PostgreSQL", "description": "PostgreSQL, often simply 'Postgres', is an object-relational database management system (ORDBMS) with an emphasis on extensibility and standards-compliance. PostgreSQL is cross-platform and runs on many operating systems. (http://en.wikipedia.org/wiki/PostgreSQL)", "properties": { "sw_database_postgresql_version": { "title": "Version", "description": "The specific version of PostgreSQL.", "type": "string" }, "sw_database_postgresql_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which PostgreSQL is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 5432 }, "sw_database_postgresql_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "postgres" } } }, { "name": "SQL Server", "description": "Microsoft SQL Server is a relational database management system developed by Microsoft. There are at least a dozen different editions of Microsoft SQL Server aimed at different audiences and for workloads ranging from small single-machine applications to large Internet-facing applications with many concurrent users. Its primary query languages are T-SQL and ANSI SQL. (http://en.wikipedia.org/wiki/Microsoft_SQL_Server)", "properties": { "sw_database_sqlserver_version": { "title": "Version", "description": "The specific version of Microsoft SQL Server.", "type": "string" }, "sw_database_sqlserver_edition": { "title": "Edition", "description": "SQL Server is available in multiple editions, with different feature sets and targeting different users.", "type": "string", "default": "Express", "enum": [ "Datacenter", "Enterprise", "Standard", "Web", "Business Intelligence", "Workgroup", "Express", "Compact (SQL CE)", "Developer", "Embedded (SSEE)", "Express", "Fast Track", "LocalDB", "Parallel Data Warehouse (PDW)", "Business Intelligence", "Datawarehouse Appliance Edition" ] }, "sw_database_sqlserver_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which SQL Server is to listen for connections from client applications. The default SQL Server port is 1433, and client ports are assigned a random value between 1024 and 5000.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 1433 }, "sw_database_postsqlserver_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "sa" } } }, { "name": "Oracle", "description": "Oracle Database (commonly referred to as Oracle RDBMS or simply as Oracle) is an object-relational database management system produced and marketed by Oracle Corporation. (http://en.wikipedia.org/wiki/Oracle_Database)", "properties": { "sw_database_oracle_version": { "title": "Version", "description": "The specific version of Oracle.", "type": "string" }, "sw_database_oracle_edition": { "title": "Edition", "description": "Over and above the different versions of the Oracle database management software developed over time, Oracle Corporation subdivides its product into varying editions.", "type": "string", "default": "Express", "enum": [ "Enterprise", "Standard", "Standard Edition One", "Express (XE)", "Workgroup", "Lite" ] }, "sw_database_oracle_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Oracle is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 1521 } } }, { "name": "DB2", "description": "IBM DB2 is a family of database server products developed by IBM. These products all support the relational model, but in recent years some products have been extended to support object-relational features and non-relational structures, in particular XML. (http://en.wikipedia.org/wiki/IBM_DB2)", "properties": { "sw_database_db2_version": { "title": "Version", "description": "The specific version of DB2.", "type": "string" }, "sw_database_db2_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which DB2 is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 5432 }, "sw_database_db2_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string" } } }, { "name": "MongoDB", "description": "MongoDB is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB uses JSON-like documents with dynamic schemas (MongoDB calls the format BSON), making the integration of data in certain types of applications easier and faster. Released under a combination of the GNU Affero General Public License and the Apache License, MongoDB is free and open-source software. (http://en.wikipedia.org/wiki/MongoDB)", "properties": { "sw_database_mongodb_version": { "title": "Version", "description": "The specific version of MongoDB.", "type": "string" }, "sw_database_mongodb_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which MongoDB is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 27017 }, "sw_database_mongodb_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string" } } }, { "name": "Couchbase Server", "description": "Couchbase Server, originally known as Membase, is an open source, distributed (shared-nothing architecture) NoSQL document-oriented database that is optimized for interactive applications. These applications must serve many concurrent users by creating, storing, retrieving, aggregating, manipulating and presenting data. In support of these kinds of application needs, Couchbase is designed to provide easy-to-scale key-value or document access with low latency and high sustained throughput. (http://en.wikipedia.org/wiki/Couchbase_Server)", "properties": { "sw_database_couchbaseserver_version": { "title": "Version", "description": "The specific version of Couchbase Server.", "type": "string" }, "sw_database_couchbaseserver_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Couchbase is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 11211 }, "sw_database_couchbaseserver_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "admin" } } }, { "name": "Redis", "description": "Redis is a data structure server (NoSQL). It is open-source, networked, in-memory, and stores keys with optional durability. The development of Redis has been sponsored by Pivotal Software since May 2013; before that, it was sponsored by VMware. The name Redis means REmote DIctionary Server. (http://en.wikipedia.org/wiki/Redis)", "properties": { "sw_database_redis_version": { "title": "Version", "description": "The specific version of Redis.", "type": "string" }, "sw_database_redis_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Redis is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 6379 }, "sw_database_redis_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "admin" } } }, { "name": "CouchDB", "description": "Apache CouchDB, commonly referred to as CouchDB, is an open source NoSQL database. It is a NoSQL database that uses JSON to store data, JavaScript as its query language using MapReduce, and HTTP for an API. One of its distinguishing features is multi-master replication. CouchDB was first released in 2005 and later became an Apache project in 2008. (http://en.wikipedia.org/wiki/CouchDB)", "properties": { "sw_database_couchdb_version": { "title": "Version", "description": "The specific version of CouchDB.", "type": "string" }, "sw_database_couchdb_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which CouchDB is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 5984 }, "sw_database_couchdb_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string" } } }, { "name": "Apache Cassandra", "description": "Apache Cassandra is an open source distributed NoSQL database management system designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. (http://en.wikipedia.org/wiki/Apache_Cassandra)", "properties": { "sw_database_cassandra_version": { "title": "Version", "description": "The specific version of Apache Cassandra.", "type": "string" }, "sw_database_cassandra_listen_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Cassandra is to listen for connections from client applications.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 9160 }, "sw_database_cassandra_admin": { "title": "Admin User", "description": "The primary user with privileges to perform administrative operations.", "type": "string", "default": "cassandra" } } }, { "name": "HBase", "description": "HBase is an open source, non-relational (NoSQL), distributed database modeled after Google's BigTable and written in Java. It is developed as part of Apache Software Foundation's Apache Hadoop project and runs on top of HDFS (Hadoop Distributed Filesystem), providing BigTable-like capabilities for Hadoop. (http://en.wikipedia.org/wiki/Apache_HBase)", "properties": { "sw_database_hbase_version": { "title": "Version", "description": "The specific version of HBase.", "type": "string" } } }, { "name": "Hazlecast", "description": "In computing, Hazelcast is an in-memory open source software data grid based on Java. By having multiple nodes form a cluster, data is evenly distributed among the nodes. This allows for horizontal scaling both in terms of available storage space and processing power. Backups are also distributed in a similar fashion to other nodes, based on configuration, thereby protecting against single node failure. (http://en.wikipedia.org/wiki/Hazelcast)", "properties": { "sw_database_hazlecast_version": { "title": "Version", "description": "The specific version of Hazlecast.", "type": "string" }, "sw_database_hazlecast_port": { "title": "Listen Port", "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Hazlecast is to listen for connections between members.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 5701 } } } ] } glance-12.0.0/etc/metadefs/compute-vmware-flavor.json0000664000567000056710000000277712701407047023703 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::VMwareFlavor", "display_name": "VMware Driver Options for Flavors", "description": "VMware Driver Options for Flavors may be used to customize and manage Nova Flavors. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. See: http://docs.openstack.org/admin-guide-cloud/compute-flavors.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "properties": { "vmware:hw_version": { "title": "VMware Hardware Version", "description": "Specifies the hardware version VMware uses to create images. If the hardware version needs to be compatible with a cluster version, for backward compatibility or other circumstances, the vmware:hw_version key specifies a virtual machine hardware version. In the event that a cluster has mixed host version types, the key will enable the vCenter to place the cluster on the correct host.", "type": "string" }, "vmware:storage_policy": { "title": "VMware Storage Policy", "description": "Specifies the storage policy to be applied for newly created instance. If not provided, the default storage policy specified in config file will be used. If Storage Policy Based Management (SPBM) is not enabled in config file, this value won't be used.", "type": "string" } } } glance-12.0.0/etc/metadefs/compute-guest-shutdown.json0000664000567000056710000000166012701407047024101 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::GuestShutdownBehavior", "display_name": "Shutdown Behavior", "description": "These properties allow modifying the shutdown behavior for stop, rescue, resize, and shelve operations.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "os_shutdown_timeout": { "title": "Shutdown timeout", "description": "By default, guests will be given 60 seconds to perform a graceful shutdown. After that, the VM is powered off. This property allows overriding the amount of time (unit: seconds) to allow a guest OS to cleanly shut down before power off. A value of 0 (zero) means the guest will be powered off immediately with no opportunity for guest OS clean-up.", "type": "integer", "minimum": 0 } }, "objects": [] } glance-12.0.0/etc/metadefs/cim-resource-allocation-setting-data.json0000664000567000056710000001561312701407047026534 0ustar jenkinsjenkins00000000000000{ "namespace": "CIM::ResourceAllocationSettingData", "display_name": "CIM Resource Allocation Setting Data", "description": "Properties from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim) that represent settings specifically related to an allocated resource that are outside the scope of the CIM class typically used to represent the resource itself. These properties may be specified to volume, host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_ResourceAllocationSettingData.html.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Cinder::Volume", "prefix": "CIM_RASD_", "properties_target": "image" }, { "name": "OS::Nova::Aggregate", "prefix": "CIM_RASD_" }, { "name": "OS::Nova::Flavor", "prefix": "CIM_RASD_" } ], "properties": { "Address": { "title": "Address", "description": "The address of the resource.", "type": "string" }, "AddressOnParent": { "title": "Address On Parent", "description": "Describes the address of this resource in the context of the Parent.", "type": "string" }, "AllocationUnits": { "title": "Allocation Units", "description": "This property specifies the units of allocation used by the Reservation and Limit properties.", "type": "string" }, "AutomaticAllocation": { "title": "Automatic Allocation", "description": "This property specifies if the resource will be automatically allocated.", "type": "boolean" }, "AutomaticDeallocation": { "title": "Automatic Deallocation", "description": "This property specifies if the resource will be automatically de-allocated.", "type": "boolean" }, "ConsumerVisibility": { "title": "Consumer Visibility", "description": "Describes the consumers visibility to the allocated resource.", "operators": [""], "type": "string", "enum": [ "Unknown", "Passed-Through", "Virtualized", "Not represented", "DMTF reserved", "Vendor Reserved" ] }, "Limit": { "title": "Limit", "description": "This property specifies the upper bound, or maximum amount of resource that will be granted for this allocation.", "type": "string" }, "MappingBehavior": { "title": "Mapping Behavior", "description": "Specifies how this resource maps to underlying resources. If the HostResource array contains any entries, this property reflects how the resource maps to those specific resources.", "operators": [""], "type": "string", "enum": [ "Unknown", "Not Supported", "Dedicated", "Soft Affinity", "Hard Affinity", "DMTF Reserved", "Vendor Reserved" ] }, "OtherResourceType": { "title": "Other Resource Type", "description": "A string that describes the resource type when a well defined value is not available and ResourceType has the value 'Other'.", "type": "string" }, "Parent": { "title": "Parent", "description": "The Parent of the resource.", "type": "string" }, "PoolID": { "title": "Pool ID", "description": "This property specifies which ResourcePool the resource is currently allocated from, or which ResourcePool the resource will be allocated from when the allocation occurs.", "type": "string" }, "Reservation": { "title": "Reservation", "description": "This property specifies the amount of resource guaranteed to be available for this allocation.", "type": "string" }, "ResourceSubType": { "title": "Resource Sub Type", "description": "A string describing an implementation specific sub-type for this resource.", "type": "string" }, "ResourceType": { "title": "Resource Type", "description": "The type of resource this allocation setting represents.", "operators": [""], "type": "string", "enum": [ "Other", "Computer System", "Processor", "Memory", "IDE Controller", "Parallel SCSI HBA", "FC HBA", "iSCSI HBA", "IB HCA", "Ethernet Adapter", "Other Network Adapter", "I/O Slot", "I/O Device", "Floppy Drive", "CD Drive", "DVD drive", "Disk Drive", "Tape Drive", "Storage Extent", "Other storage device", "Serial port", "Parallel port", "USB Controller", "Graphics controller", "IEEE 1394 Controller", "Partitionable Unit", "Base Partitionable Unit", "Power", "Cooling Capacity", "Ethernet Switch Port", "Logical Disk", "Storage Volume", "Ethernet Connection", "DMTF reserved", "Vendor Reserved" ] }, "VirtualQuantity": { "title": "Virtual Quantity", "description": "This property specifies the quantity of resources presented to the consumer.", "type": "string" }, "VirtualQuantityUnits": { "title": "Virtual Quantity Units", "description": "This property specifies the units used by the VirtualQuantity property.", "type": "string" }, "Weight": { "title": "Weight", "description": "This property specifies a relative priority for this allocation in relation to other allocations from the same ResourcePool.", "type": "string" }, "Connection": { "title": "Connection", "description": "The thing to which this resource is connected.", "type": "string" }, "HostResource": { "title": "Host Resource", "description": "This property exposes specific assignment of resources.", "type": "string" } }, "objects": [] } glance-12.0.0/etc/metadefs/software-runtimes.json0000664000567000056710000001220412701407047023121 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Software::Runtimes", "display_name": "Runtime Environment", "description": "Software is written in a specific programming language and the language must execute within a runtime environment. The runtime environment provides an abstraction to utilizing a computer's processor, memory (RAM), and other system resources.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" }, { "name": "OS::Nova::Server", "properties_target": "metadata" } ], "objects": [ { "name": "PHP", "description": "PHP is a server-side scripting language designed for web development but also used as a general-purpose programming language. PHP code can be simply mixed with HTML code, or it can be used in combination with various templating engines and web frameworks. PHP code is usually processed by a PHP interpreter, which is usually implemented as a web server's native module or a Common Gateway Interface (CGI) executable. After the PHP code is interpreted and executed, the web server sends resulting output to its client, usually in form of a part of the generated web page – for example, PHP code can generate a web page's HTML code, an image, or some other data. PHP has also evolved to include a command-line interface (CLI) capability and can be used in standalone graphical applications. (http://en.wikipedia.org/wiki/PHP)", "properties": { "sw_runtime_php_version": { "title": "Version", "description": "The specific version of PHP.", "type": "string" } } }, { "name": "Python", "description": "Python is a widely used general-purpose, high-level programming language. Its design philosophy emphasizes code readability, and its syntax allows programmers to express concepts in fewer lines of code than would be possible in languages such as C++ or Java. The language provides constructs intended to enable clear programs on both a small and large scale. Python supports multiple programming paradigms, including object-oriented, imperative and functional programming or procedural styles. It features a dynamic type system and automatic memory management and has a large and comprehensive standard library. (http://en.wikipedia.org/wiki/Python_(programming_language))", "properties": { "sw_runtime_python_version": { "title": "Version", "description": "The specific version of python.", "type": "string" } } }, { "name": "Java", "description": "Java is a functional computer programming language that is concurrent, class-based, object-oriented, and specifically designed to have as few implementation dependencies as possible. It is intended to let application developers write once, run anywhere (WORA), meaning that code that runs on one platform does not need to be recompiled to run on another. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of computer architecture. (http://en.wikipedia.org/wiki/Java_(programming_language))", "properties": { "sw_runtime_java_version": { "title": "Version", "description": "The specific version of Java.", "type": "string" } } }, { "name": "Ruby", "description": "Ruby is a dynamic, reflective, object-oriented, general-purpose programming language. It was designed and developed in the mid-1990s by Yukihiro Matsumoto in Japan. According to its authors, Ruby was influenced by Perl, Smalltalk, Eiffel, Ada, and Lisp. It supports multiple programming paradigms, including functional, object-oriented, and imperative. It also has a dynamic type system and automatic memory management. (http://en.wikipedia.org/wiki/Python_(programming_language))", "properties": { "sw_runtime_ruby_version": { "title": "Version", "description": "The specific version of Ruby.", "type": "string" } } }, { "name": "Perl", "description": "Perl is a family of high-level, general-purpose, interpreted, dynamic programming languages. The languages in this family include Perl 5 and Perl 6. Though Perl is not officially an acronym, there are various backronyms in use, the most well-known being Practical Extraction and Reporting Language (http://en.wikipedia.org/wiki/Perl)", "properties": { "sw_runtime_perl_version": { "title": "Version", "description": "The specific version of Perl.", "type": "string" } } } ] } glance-12.0.0/etc/metadefs/compute-vmware-quota-flavor.json0000664000567000056710000000316212701407047025017 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::VMwareQuotaFlavor", "display_name": "VMware Quota for Flavors", "description": "The VMware compute driver allows various compute quotas to be specified on flavors. When specified, the VMWare driver will ensure that the quota is enforced. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of hypervisors, see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix. For flavor customization, see: http://docs.openstack.org/admin-guide-cloud/compute-flavors.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "properties": { "quota:cpu_limit": { "title": "Quota: CPU Limit", "description": "Specifies the upper limit for CPU allocation in MHz. This parameter ensures that a machine never uses more than the defined amount of CPU time. It can be used to enforce a limit on the machine's CPU performance. The value should be a numerical value in MHz. If zero is supplied then the cpu_limit is unlimited.", "type": "integer", "minimum": 0 }, "quota:cpu_reservation": { "title": "Quota: CPU Reservation Limit", "description": "Specifies the guaranteed minimum CPU reservation in MHz. This means that if needed, the machine will definitely get allocated the reserved amount of CPU cycles. The value should be a numerical value in MHz.", "type": "integer", "minimum": 0 } } } glance-12.0.0/etc/metadefs/compute-instance-data.json0000664000567000056710000000350512701407047023614 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::InstanceData", "display_name": "Instance Config Data", "description": "Instances can perform self-configuration based on data made available to the running instance. These properties affect instance configuration.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" } ], "properties": { "img_config_drive": { "title": "Config Drive", "description": "This property specifies whether or not Nova should use a config drive when booting the image. Mandatory means that Nova will always use a config drive when booting the image. OpenStack can be configured to write metadata to a special configuration drive that will be attached to the instance when it boots. The instance can retrieve any information from the config drive. One use case for the config drive is to pass network configuration information to the instance. See also: http://docs.openstack.org/user-guide/cli_config_drive.html", "type": "string", "enum": [ "optional", "mandatory" ] }, "os_require_quiesce": { "title": "Require Quiescent File system", "description": "This property specifies whether or not the filesystem must be quiesced during snapshot processing. For volume backed and image backed snapshots, yes means that snapshotting is aborted when quiescing fails, whereas, no means quiescing will be skipped and snapshot processing will continue after the quiesce failure.", "type": "string", "enum": [ "yes", "no" ] } } } glance-12.0.0/etc/metadefs/cim-processor-allocation-setting-data.json0000664000567000056710000001210512701407047026715 0ustar jenkinsjenkins00000000000000{ "namespace": "CIM::ProcessorAllocationSettingData", "display_name": "CIM Processor Allocation Setting", "description": "Properties related to the resource allocation settings of a processor (CPU) from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim). These are properties that identify processor setting data and may be specified to volume, image, host aggregate, flavor and Nova server as scheduler hint. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_ProcessorAllocationSettingData.html.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Cinder::Volume", "prefix": "CIM_PASD_", "properties_target": "image" }, { "name": "OS::Glance::Image", "prefix": "CIM_PASD_" }, { "name": "OS::Nova::Aggregate", "prefix": "CIM_PASD_" }, { "name": "OS::Nova::Flavor", "prefix": "CIM_PASD_" }, { "name": "OS::Nova::Server", "properties_target": "scheduler_hint" } ], "properties": { "InstructionSet": { "title": "Instruction Set", "description": "Identifies the instruction set of the processor within a processor architecture.", "operators": [""], "type": "string", "enum": [ "x86:i386", "x86:i486", "x86:i586", "x86:i686", "x86:64", "IA-64:IA-64", "AS/400:TIMI", "Power:Power_2.03", "Power:Power_2.04", "Power:Power_2.05", "Power:Power_2.06", "S/390:ESA/390", "S/390:z/Architecture", "S/390:z/Architecture_2", "PA-RISC:PA-RISC_1.0", "PA-RISC:PA-RISC_2.0", "ARM:A32", "ARM:A64", "MIPS:MIPS_I", "MIPS:MIPS_II", "MIPS:MIPS_III", "MIPS:MIPS_IV", "MIPS:MIPS_V", "MIPS:MIPS32", "MIPS64:MIPS64", "Alpha:Alpha", "SPARC:SPARC_V7", "SPARC:SPARC_V8", "SPARC:SPARC_V9", "SPARC:SPARC_JPS1", "SPARC:UltraSPARC2005", "SPARC:UltraSPARC2007", "68k:68000", "68k:68010", "68k:68020", "68k:68030", "68k:68040", "68k:68060" ] }, "ProcessorArchitecture": { "title": "Processor Architecture", "description": "Identifies the processor architecture of the processor.", "operators": [""], "type": "string", "enum": [ "x86", "IA-64", "AS/400", "Power", "S/390", "PA-RISC", "ARM", "MIPS", "Alpha", "SPARC", "68k" ] }, "InstructionSetExtensionName": { "title": "Instruction Set Extension", "description": "Identifies the instruction set extensions of the processor within a processor architecture.", "operators": ["", ""], "type": "array", "items": { "type": "string", "enum": [ "x86:3DNow", "x86:3DNowExt", "x86:ABM", "x86:AES", "x86:AVX", "x86:AVX2", "x86:BMI", "x86:CX16", "x86:F16C", "x86:FSGSBASE", "x86:LWP", "x86:MMX", "x86:PCLMUL", "x86:RDRND", "x86:SSE2", "x86:SSE3", "x86:SSSE3", "x86:SSE4A", "x86:SSE41", "x86:SSE42", "x86:FMA3", "x86:FMA4", "x86:XOP", "x86:TBM", "x86:VT-d", "x86:VT-x", "x86:EPT", "x86:SVM", "PA-RISC:MAX", "PA-RISC:MAX2", "ARM:DSP", "ARM:Jazelle-DBX", "ARM:Thumb", "ARM:Thumb-2", "ARM:ThumbEE)", "ARM:VFP", "ARM:NEON", "ARM:TrustZone", "MIPS:MDMX", "MIPS:MIPS-3D", "Alpha:BWX", "Alpha:FIX", "Alpha:CIX", "Alpha:MVI" ] } } }, "objects": [] } glance-12.0.0/etc/metadefs/compute-guest-memory-backing.json0000664000567000056710000000217112701407047025130 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::GuestMemoryBacking", "display_name": "Guest Memory Backing", "description": "This provides the preferred backing option for guest RAM. Guest's memory can be backed by hugepages to limit TLB lookups. See also: https://wiki.openstack.org/wiki/VirtDriverGuestCPUMemoryPlacement", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor", "prefix": "hw:" }, { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image" } ], "properties": { "mem_page_size": { "title": "Size of memory page", "description": "Page size to be used for Guest memory backing. Value can be specified as (i.e.: 2MB, 1GB) or 'any', 'small', 'large'. If this property is set in Image metadata then only 'any' and 'large' values are accepted in Flavor metadata by Nova API.", "type": "string" } } }glance-12.0.0/etc/metadefs/compute-aggr-iops-filter.json0000664000567000056710000000202312701407047024246 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::AggregateIoOpsFilter", "display_name": "IO Ops per Host", "description": "Properties related to the Nova scheduler filter AggregateIoOpsFilter. Filters aggregate hosts based on the number of instances currently changing state. Hosts in the aggregate with too many instances changing state will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Aggregate" } ], "properties": { "max_io_ops_per_host": { "title": "Maximum IO Operations per Host", "description": "Prevents hosts in the aggregate that have this many or more instances currently in build, resize, snapshot, migrate, rescue or unshelve to be scheduled for new instances.", "type": "integer", "readonly": false, "default": 8, "minimum": 1 } }, "objects": [] } glance-12.0.0/etc/metadefs/compute-vmware.json0000664000567000056710000001560412701407047022405 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::VMware", "display_name": "VMware Driver Options", "description": "The VMware compute driver options. \n\nThese are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "vmware_adaptertype": { "title": "Disk Adapter Type", "description": "The virtual SCSI or IDE controller used by the hypervisor.", "type": "string", "enum": [ "lsiLogic", "lsiLogicsas", "paraVirtual", "busLogic", "ide" ], "default" : "lsiLogic" }, "vmware_disktype": { "title": "Disk Provisioning Type", "description": "When performing operations such as creating a virtual disk, cloning, or migrating, the disk provisioning type may be specified. Please refer to VMware documentation for more.", "type": "string", "enum": [ "streamOptimized", "sparse", "preallocated" ], "default" : "preallocated" }, "vmware_ostype": { "title": "OS Type", "description": "A VMware GuestID which describes the operating system installed in the image. This value is passed to the hypervisor when creating a virtual machine. If not specified, the key defaults to otherGuest. See thinkvirt.com.", "type": "string", "enum": [ "asianux3_64Guest", "asianux3Guest", "asianux4_64Guest", "asianux4Guest", "asianux5_64Guest", "centos64Guest", "centosGuest", "coreos64Guest", "darwin10_64Guest", "darwin10Guest", "darwin11_64Guest", "darwin11Guest", "darwin12_64Guest", "darwin13_64Guest", "darwin14_64Guest", "darwin64Guest", "darwinGuest", "debian4_64Guest", "debian4Guest", "debian5_64Guest", "debian5Guest", "debian6_64Guest", "debian6Guest", "debian7_64Guest", "debian7Guest", "debian8_64Guest", "debian8Guest", "dosGuest", "eComStation2Guest", "eComStationGuest", "fedora64Guest", "fedoraGuest", "freebsd64Guest", "freebsdGuest", "genericLinuxGuest", "mandrakeGuest", "mandriva64Guest", "mandrivaGuest", "netware4Guest", "netware5Guest", "netware6Guest", "nld9Guest", "oesGuest", "openServer5Guest", "openServer6Guest", "opensuse64Guest", "opensuseGuest", "oracleLinux64Guest", "oracleLinuxGuest", "os2Guest", "other24xLinux64Guest", "other24xLinuxGuest", "other26xLinux64Guest", "other26xLinuxGuest", "other3xLinux64Guest", "other3xLinuxGuest", "otherGuest", "otherGuest64", "otherLinux64Guest", "otherLinuxGuest", "redhatGuest", "rhel2Guest", "rhel3_64Guest", "rhel3Guest", "rhel4_64Guest", "rhel4Guest", "rhel5_64Guest", "rhel5Guest", "rhel6_64Guest", "rhel6Guest", "rhel7_64Guest", "rhel7Guest", "sjdsGuest", "sles10_64Guest", "sles10Guest", "sles11_64Guest", "sles11Guest", "sles12_64Guest", "sles12Guest", "sles64Guest", "slesGuest", "solaris10_64Guest", "solaris10Guest", "solaris11_64Guest", "solaris6Guest", "solaris7Guest", "solaris8Guest", "solaris9Guest", "turboLinux64Guest", "turboLinuxGuest", "ubuntu64Guest", "ubuntuGuest", "unixWare7Guest", "vmkernel5Guest", "vmkernel6Guest", "vmkernelGuest", "win2000AdvServGuest", "win2000ProGuest", "win2000ServGuest", "win31Guest", "win95Guest", "win98Guest", "windows7_64Guest", "windows7Guest", "windows7Server64Guest", "windows8_64Guest", "windows8Guest", "windows8Server64Guest", "windows9_64Guest", "windows9Guest", "windows9Server64Guest", "windowsHyperVGuest", "winLonghorn64Guest", "winLonghornGuest", "winMeGuest", "winNetBusinessGuest", "winNetDatacenter64Guest", "winNetDatacenterGuest", "winNetEnterprise64Guest", "winNetEnterpriseGuest", "winNetStandard64Guest", "winNetStandardGuest", "winNetWebGuest", "winNTGuest", "winVista64Guest", "winVistaGuest", "winXPHomeGuest", "winXPPro64Guest", "winXPProGuest" ], "default": "otherGuest" }, "hw_vif_model": { "title": "Virtual Network Interface", "description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor. VMware driver supported options: e1000, e1000e, VirtualE1000, VirtualE1000e, VirtualPCNet32, VirtualSriovEthernetCard, and VirtualVmxnet.", "type": "string", "enum": [ "e1000", "e1000e", "VirtualE1000", "VirtualE1000e", "VirtualPCNet32", "VirtualSriovEthernetCard", "VirtualVmxnet", "VirtualVmxnet3" ], "default" : "e1000" } }, "objects": [] } glance-12.0.0/etc/metadefs/glance-common-image-props.json0000664000567000056710000000436112701407047024370 0ustar jenkinsjenkins00000000000000{ "display_name": "Common Image Properties", "namespace": "OS::Glance::CommonImageProperties", "description": "When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image.", "protected": true, "resource_type_associations" : [ ], "properties": { "kernel_id": { "title": "Kernel ID", "type": "string", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." }, "ramdisk_id": { "title": "Ramdisk ID", "type": "string", "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." }, "instance_uuid": { "title": "Instance ID", "type": "string", "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)" }, "architecture": { "title": "CPU Architecture", "description": "The CPU architecture that must be supported by the hypervisor. For example, x86_64, arm, or ppc64. Run uname -m to get the architecture of a machine. We strongly recommend using the architecture data vocabulary defined by the libosinfo project for this purpose.", "type": "string" }, "os_distro": { "title": "OS Distro", "description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.", "type": "string" }, "os_version": { "title": "OS Version", "description": "Operating system version as specified by the distributor. (for example, '11.10')", "type": "string" } } } glance-12.0.0/etc/metadefs/compute-xenapi.json0000664000567000056710000000302712701407047022364 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::XenAPI", "display_name": "XenAPI Driver Options", "description": "The XenAPI compute driver options. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "os_type": { "title": "OS Type", "description": "The operating system installed on the image. The XenAPI driver contains logic that takes different actions depending on the value of the os_type parameter of the image. For example, for os_type=windows images, it creates a FAT32-based swap partition instead of a Linux swap partition, and it limits the injected host name to less than 16 characters.", "type": "string", "enum": [ "linux", "windows" ] }, "auto_disk_config": { "title": "Disk Adapter Type", "description": "If true, the root partition on the disk is automatically resized before the instance boots. This value is only taken into account by the Compute service when using a Xen-based hypervisor with the XenAPI driver. The Compute service will only attempt to resize if there is a single partition on the image, and only if the partition is in ext3 or ext4 format.", "type": "boolean" } }, "objects": [] } glance-12.0.0/etc/metadefs/compute-watchdog.json0000664000567000056710000000251012701407047022674 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::Watchdog", "display_name": "Watchdog Behavior", "description": "Compute drivers may enable watchdog behavior over instances. See: http://docs.openstack.org/admin-guide-cloud/compute-flavors.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" }, { "name": "OS::Nova::Flavor" } ], "properties": { "hw_watchdog_action": { "title": "Watchdog Action", "description": "For the libvirt driver, you can enable and set the behavior of a virtual hardware watchdog device for each flavor. Watchdog devices keep an eye on the guest server, and carry out the configured action, if the server hangs. The watchdog uses the i6300esb device (emulating a PCI Intel 6300ESB). If hw_watchdog_action is not specified, the watchdog is disabled. Watchdog behavior set using a specific image's properties will override behavior set using flavors.", "type": "string", "enum": [ "disabled", "reset", "poweroff", "pause", "none" ] } } } glance-12.0.0/etc/metadefs/storage-volume-type.json0000664000567000056710000000211412701407047023352 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Cinder::Volumetype", "display_name": "Cinder Volume Type", "description": "The Cinder volume type configuration option. Volume type assignment provides a mechanism not only to provide scheduling to a specific storage back-end, but also can be used to specify specific information for a back-end storage device to act upon.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "cinder_" } ], "properties": { "img_volume_type": { "title": "Image Volume Type", "description": "Specifies the volume type that should be applied during new volume creation with a image. This value is passed to Cinder when creating a new volume. Priority of volume type related parameters are 1.volume_type(via API or CLI), 2.cinder_img_volume_type, 3.default_volume_type(via cinder.conf). If not specified, volume_type or default_volume_type will be referred based on their priority.", "type": "string" } } } glance-12.0.0/etc/metadefs/operating-system.json0000664000567000056710000000244212701407047022740 0ustar jenkinsjenkins00000000000000{ "display_name": "Common Operating System Properties", "namespace": "OS::OperatingSystem", "description": "Details of the operating system contained within this image as well as common operating system properties that can be set on a VM instance created from this image.", "protected": true, "resource_type_associations" : [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" } ], "properties": { "os_distro": { "title": "OS Distro", "description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.", "type": "string" }, "os_version": { "title": "OS Version", "description": "Operating system version as specified by the distributor. (for example, '11.10')", "type": "string" }, "os_admin_user": { "title": "OS Admin User", "description": "The name of the user with admin privileges.", "type": "string" } } } glance-12.0.0/etc/metadefs/compute-libvirt-image.json0000664000567000056710000001070612701407047023635 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::LibvirtImage", "display_name": "libvirt Driver Options for Images", "description": "The libvirt Compute Driver Options for Glance Images. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "hw_disk_bus": { "title": "Disk Bus", "description": "Specifies the type of disk controller to attach disk devices to.", "type": "string", "enum": [ "scsi", "virtio", "uml", "xen", "ide", "usb" ] }, "hw_rng_model": { "title": "Random Number Generator Device", "description": "Adds a random-number generator device to the image's instances. The cloud administrator can enable and control device behavior by configuring the instance's flavor. By default: The generator device is disabled. /dev/random is used as the default entropy source. To specify a physical HW RNG device, use the following option in the nova.conf file: rng_dev_path=/dev/hwrng", "type": "string", "default": "virtio" }, "hw_machine_type": { "title": "Machine Type", "description": "Enables booting an ARM system using the specified machine type. By default, if an ARM image is used and its type is not specified, Compute uses vexpress-a15 (for ARMv7) or virt (for AArch64) machine types. Valid types can be viewed by using the virsh capabilities command (machine types are displayed in the machine tag).", "type": "string" }, "hw_scsi_model": { "title": "SCSI Model", "description": "Enables the use of VirtIO SCSI (virtio-scsi) to provide block device access for compute instances; by default, instances use VirtIO Block (virtio-blk). VirtIO SCSI is a para-virtualized SCSI controller device that provides improved scalability and performance, and supports advanced SCSI hardware.", "type": "string", "default": "virtio-scsi" }, "hw_video_model": { "title": "Video Model", "description": "The video image driver used.", "type": "string", "enum": [ "vga", "cirrus", "vmvga", "xen", "qxl" ] }, "hw_video_ram": { "title": "Max Video Ram", "description": "Maximum RAM (unit: MB) for the video image. Used only if a hw_video:ram_max_mb value has been set in the flavor's extra_specs and that value is higher than the value set in hw_video_ram.", "type": "integer", "minimum": 0 }, "os_command_line": { "title": "Kernel Command Line", "description": "The kernel command line to be used by the libvirt driver, instead of the default. For linux containers (LXC), the value is used as arguments for initialization. This key is valid only for Amazon kernel, ramdisk, or machine images (aki, ari, or ami).", "type": "string" }, "hw_vif_model": { "title": "Virtual Network Interface", "description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor configuration. libvirt driver options: KVM and QEMU: e1000, ne2k_pci, pcnet, rtl8139, spapr-vlan, and virtio. Xen: e1000, netfront, ne2k_pci, pcnet, and rtl8139.", "type": "string", "enum": [ "e1000", "e1000e", "ne2k_pci", "netfront", "pcnet", "rtl8139", "spapr-vlan", "virtio" ] }, "hw_qemu_guest_agent": { "title": "QEMU Guest Agent", "description": "This is a background process which helps management applications execute guest OS level commands. For example, freezing and thawing filesystems, entering suspend. However, guest agent (GA) is not bullet proof, and hostile guest OS can send spurious replies.", "type": "string", "enum": ["yes", "no"] } }, "objects": [] } glance-12.0.0/etc/metadefs/compute-hypervisor.json0000664000567000056710000000413212701407047023310 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::Hypervisor", "display_name": "Hypervisor Selection", "description": "OpenStack Compute supports many hypervisors, although most installations use only one hypervisor. For installations with multiple supported hypervisors, you can schedule different hypervisors using the ImagePropertiesFilter. This filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" } ], "properties": { "hypervisor_type": { "title": "Hypervisor Type", "description": "Hypervisor type required by the image. Used with the ImagePropertiesFilter. \n\n KVM - Kernel-based Virtual Machine. LXC - Linux Containers (through libvirt). QEMU - Quick EMUlator. UML - User Mode Linux. hyperv - Microsoft® hyperv. vmware - VMware® vsphere. Baremetal - physical provisioning. For more information, see: http://docs.openstack.org/trunk/config-reference/content/section_compute-hypervisors.html", "type": "string", "enum": [ "baremetal", "hyperv", "kvm", "lxc", "qemu", "uml", "vmware", "xen" ] }, "vm_mode": { "title": "VM Mode", "description": "The virtual machine mode. This represents the host/guest ABI (application binary interface) used for the virtual machine. Used with the ImagePropertiesFilter. \n\n hvm — Fully virtualized - This is the virtual machine mode (vm_mode) used by QEMU and KVM. \n\n xen - Xen 3.0 paravirtualized. \n\n uml — User Mode Linux paravirtualized. \n\n exe — Executables in containers. This is the mode used by LXC.", "type": "string", "enum": [ "hvm", "xen", "uml", "exe" ] } }, "objects": [] } glance-12.0.0/etc/metadefs/cim-virtual-system-setting-data.json0000664000567000056710000001216712701407047025573 0ustar jenkinsjenkins00000000000000{ "namespace": "CIM::VirtualSystemSettingData", "display_name": "CIM Virtual System Setting Data", "description": "A set of virtualization specific properties from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim), which define the virtual aspects of a virtual system. These properties may be specified to host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_VirtualSystemSettingData.html.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Aggregate", "prefix": "CIM_VSSD_" }, { "name": "OS::Nova::Flavor", "prefix": "CIM_VSSD_" } ], "properties": { "AutomaticRecoveryAction": { "title": "Automatic Recovery Action", "description": "Action to take for the virtual system when the software executed by the virtual system fails.", "operators": [""], "type": "string", "enum": [ "None", "Restart", "Revert to snapshot", "DMTF Reserved" ] }, "AutomaticShutdownAction": { "title": "Automatic Shutdown Action", "description": "Action to take for the virtual system when the host is shut down.", "operators": [""], "type": "string", "enum": [ "Turn Off", "Save state", "Shutdown", "DMTF Reserved" ] }, "AutomaticStartupAction": { "title": "Automatic Startup Action", "description": "Action to take for the virtual system when the host is started.", "operators": [""], "type": "string", "enum": [ "None", "Restart if previously active", "Always startup", "DMTF Reserved" ] }, "AutomaticStartupActionDelay": { "title": "Automatic Startup Action Delay", "description": "Delay applicable to startup action.", "type": "string" }, "AutomaticStartupActionSequenceNumber": { "title": "Automatic Startup Action Sequence Number", "description": "Number indicating the relative sequence of virtual system activation when the host system is started.", "type": "string" }, "ConfigurationDataRoot": { "title": "Configuration Data Root", "description": "Filepath of a directory where information about the virtual system configuration is stored.", "type": "string" }, "ConfigurationFile": { "title": "Configuration File", "description": "Filepath of a file where information about the virtual system configuration is stored.", "type": "string" }, "ConfigurationID": { "title": "Configuration ID", "description": "Unique id of the virtual system configuration.", "type": "string" }, "CreationTime": { "title": "Creation Time", "description": "Time when the virtual system configuration was created.", "type": "string" }, "LogDataRoot": { "title": "Log Data Root", "description": "Filepath of a directory where log information about the virtual system is stored.", "type": "string" }, "RecoveryFile": { "title": "Recovery File", "description": "Filepath of a file where recovery relateded information of the virtual system is stored.", "type": "string" }, "SnapshotDataRoot": { "title": "Snapshot Data Root", "description": "Filepath of a directory where information about virtual system snapshots is stored.", "type": "string" }, "SuspendDataRoot": { "title": "Suspend Data Root", "description": "Filepath of a directory where suspend related information about the virtual system is stored.", "type": "string" }, "SwapFileDataRoot": { "title": "Swap File Data Root", "description": "Filepath of a directory where swapfiles of the virtual system are stored.", "type": "string" }, "VirtualSystemIdentifier": { "title": "Virtual System Identifier", "description": "VirtualSystemIdentifier shall reflect a unique name for the system as it is used within the virtualization platform.", "type": "string" }, "VirtualSystemType": { "title": "Virtual System Type", "description": "VirtualSystemType shall reflect a particular type of virtual system.", "type": "string" }, "Notes": { "title": "Notes", "description": "End-user supplied notes that are related to the virtual system.", "type": "string" } }, "objects": [] } glance-12.0.0/etc/metadefs/compute-vcputopology.json0000664000567000056710000000370212701407047023652 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::VirtCPUTopology", "display_name": "Virtual CPU Topology", "description": "This provides the preferred socket/core/thread counts for the virtual CPU instance exposed to guests. This enables the ability to avoid hitting limitations on vCPU topologies that OS vendors place on their products. See also: http://git.openstack.org/cgit/openstack/nova-specs/tree/specs/juno/virt-driver-vcpu-topology.rst", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image" }, { "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "properties": { "cpu_sockets": { "title": "vCPU Sockets", "description": "Preferred number of sockets to expose to the guest.", "type": "integer" }, "cpu_cores": { "title": "vCPU Cores", "description": "Preferred number of cores to expose to the guest.", "type": "integer" }, "cpu_threads": { "title": " vCPU Threads", "description": "Preferred number of threads to expose to the guest.", "type": "integer" }, "cpu_maxsockets": { "title": "Max vCPU Sockets", "description": "Maximum number of sockets to expose to the guest.", "type": "integer" }, "cpu_maxcores": { "title": "Max vCPU Cores", "description": "Maximum number of cores to expose to the guest.", "type": "integer" }, "cpu_maxthreads": { "title": "Max vCPU Threads", "description": "Maximum number of threads to expose to the guest.", "type": "integer" } } } glance-12.0.0/etc/metadefs/compute-aggr-num-instances.json0000664000567000056710000000160112701407047024576 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::AggregateNumInstancesFilter", "display_name": "Instances per Host", "description": "Properties related to the Nova scheduler filter AggregateNumInstancesFilter. Filters aggregate hosts by the number of running instances on it. Hosts in the aggregate with too many instances will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public", "protected": false, "resource_type_associations": [ { "name": "OS::Nova::Aggregate" } ], "properties": { "max_instances_per_host": { "title": "Max Instances Per Host", "description": "Maximum number of instances allowed to run on a host in the aggregate.", "type": "integer", "readonly": false, "minimum": 0 } }, "objects": [] } glance-12.0.0/etc/metadefs/compute-host-capabilities.json0000664000567000056710000002202412701407047024502 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::HostCapabilities", "display_name": "Compute Host Capabilities", "description": "Hardware capabilities provided by the compute host. This provides the ability to fine tune the hardware specification required when an instance is requested. The ComputeCapabilitiesFilter should be enabled in the Nova scheduler to use these properties. When enabled, this filter checks that the capabilities provided by the compute host satisfy any extra specifications requested. Only hosts that can provide the requested capabilities will be eligible for hosting the instance.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor", "prefix": "capabilities:" }, { "name": "OS::Nova::Aggregate", "prefix": "aggregate_instance_extra_specs:" } ], "properties": { "cpu_info:vendor": { "title": "Vendor", "description": "Specifies the CPU manufacturer.", "operators": [""], "type": "string", "enum": [ "Intel", "AMD" ] }, "cpu_info:model": { "title": "Model", "description": "Specifies the CPU model. Use this property to ensure that your vm runs on a a specific cpu model.", "operators": [""], "type": "string", "enum": [ "Conroe", "Core2Duo", "Penryn", "Nehalem", "Westmere", "SandyBridge", "IvyBridge", "Haswell", "Broadwell", "Delhi", "Seoul", "Abu Dhabi", "Interlagos", "Kabini", "Valencia", "Zurich", "Budapest", "Barcelona", "Suzuka", "Shanghai", "Istanbul", "Lisbon", "Magny-Cours", "Valencia", "Cortex-A57", "Cortex-A53", "Cortex-A12", "Cortex-A17", "Cortex-A15", "Coretx-A7", "X-Gene" ] }, "cpu_info:arch": { "title": "Architecture", "description": "Specifies the CPU architecture. Use this property to specify the architecture supported by the hypervisor.", "operators": [""], "type": "string", "enum": [ "x86", "x86_64", "i686", "ia64", "ARMv8-A", "ARMv7-A" ] }, "cpu_info:topology:cores": { "title": "cores", "description": "Number of cores.", "type": "integer", "readonly": false, "default": 1 }, "cpu_info:topology:threads": { "title": "threads", "description": "Number of threads.", "type": "integer", "readonly": false, "default": 1 }, "cpu_info:topology:sockets": { "title": "sockets", "description": "Number of sockets.", "type": "integer", "readonly": false, "default": 1 }, "cpu_info:features": { "title": "Features", "description": "Specifies CPU flags/features. Using this property you can specify the required set of instructions supported by a vm.", "operators": ["", ""], "type": "array", "items": { "type": "string", "enum": [ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "pn", "clflush", "dts", "acpi", "mmx", "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", "syscall", "mp", "nx", "mmxext", "fxsr_opt", "pdpe1gb", "rdtscp", "lm", "3dnowext", "3dnow", "arch_perfmon", "pebs", "bts", "rep_good", "nopl", "xtopology", "tsc_reliable", "nonstop_tsc", "extd_apicid", "amd_dcm", "aperfmperf", "eagerfpu", "nonstop_tsc_s3", "pni", "pclmulqdq", "dtes64", "monitor", "ds_cpl", "vmx", "smx", "est", "tm2", "ssse3", "cid", "fma", "cx16", "xtpr", "pdcm", "pcid", "dca", "sse4_1", "sse4_2", "x2apic", "movbe", "popcnt", "tsc_deadline_timer", "aes", "xsave", "avx", "f16c", "rdrand", "hypervisor", "rng", "rng_en", "ace", "ace_en", "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy", "abm", "sse4a", "misalignsse", "3dnowprefetch", "osvw", "ibs", "xop", "skinit", "wdt", "lwp", "fma4", "tce", "nodeid_msr", "tbm", "topoext", "perfctr_core", "perfctr_nb", "bpext", "perfctr_l2", "mwaitx", "ida", "arat", "cpb", "epb", "pln", "pts", "dtherm", "hw_pstate", "proc_feedback", "hwp", "hwp_notify", "hwp_act_window", "hwp_epp", "hwp_pkg_req", "intel_pt", "tpr_shadow", "vnmi", "flexpriority", "ept", "vpid", "npt", "lbrv", "svm_lock", "nrip_save", "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists", "pausefilter", "pfthreshold", "vmmcall", "fsgsbase", "tsc_adjust", "bmi1", "hle", "avx2", "smep", "bmi2", "erms", "invpcid", "rtm", "cqm", "mpx", "avx512f", "rdseed", "adx", "smap", "pcommit", "clflushopt", "clwb", "avx512pf", "avx512er", "avx512cd", "sha_ni", "xsaveopt", "xsavec", "xgetbv1", "xsaves", "cqm_llc", "cqm_occup_llc", "clzero" ] } } }, "objects": [] } glance-12.0.0/etc/metadefs/cim-storage-allocation-setting-data.json0000664000567000056710000001206012701407047026342 0ustar jenkinsjenkins00000000000000{ "namespace": "CIM::StorageAllocationSettingData", "display_name": "CIM Storage Allocation Setting Data", "description": "Properties related to the allocation of virtual storage from Common Information Model (CIM) schema (http://www.dmtf.org/standards/cim). These properties may be specified to volume, host aggregate and flavor. For each property details, please refer to http://schemas.dmtf.org/wbem/cim-html/2/CIM_StorageAllocationSettingData.html.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Cinder::Volume", "prefix": "CIM_SASD_" }, { "name": "OS::Nova::Aggregate", "prefix": "CIM_SASD_" }, { "name": "OS::Nova::Flavor", "prefix": "CIM_SASD_" } ], "properties": { "Access": { "title": "Access", "description": "Access describes whether the allocated storage extent is 1 (readable), 2 (writeable), or 3 (both).", "operators": [""], "type": "string", "enum": [ "Unknown", "Readable", "Writeable", "Read/Write Supported", "DMTF Reserved" ] }, "HostExtentName": { "title": "Host Extent Name", "description": "A unique identifier for the host extent.", "type": "string" }, "HostExtentNameFormat": { "title": "Host Extent Name Format", "description": "The HostExtentNameFormat property identifies the format that is used for the value of the HostExtentName property.", "operators": [""], "type": "string", "enum": [ "Unknown", "Other", "SNVM", "NAA", "EUI64", "T10VID", "OS Device Name", "DMTF Reserved" ] }, "HostExtentNameNamespace": { "title": "Host Extent Name Namespace", "description": "If the host extent is a SCSI volume, then the preferred source for SCSI volume names is SCSI VPD Page 83 responses.", "operators": [""], "type": "string", "enum": [ "Unknown", "Other", "VPD83Type3", "VPD83Type2", "VPD83Type1", "VPD80", "NodeWWN", "SNVM", "OS Device Namespace", "DMTF Reserved" ] }, "HostExtentStartingAddress": { "title": "Host Extent Starting Address", "description": "The HostExtentStartingAddress property identifies the starting address on the host storage extent identified by the value of the HostExtentName property that is used for the allocation of the virtual storage extent.", "type": "string" }, "HostResourceBlockSize": { "title": "Host Resource Block Size", "description": "Size in bytes of the blocks that are allocated at the host as the result of this storage resource allocation or storage resource allocation request.", "type": "string" }, "Limit": { "title": "Limit", "description": "The maximum amount of blocks that will be granted for this storage resource allocation at the host.", "type": "string" }, "OtherHostExtentNameFormat": { "title": "Other Host Extent Name Format", "description": "A string describing the format of the HostExtentName property if the value of the HostExtentNameFormat property is 1 (Other).", "type": "string" }, "OtherHostExtentNameNamespace": { "title": "Other Host Extent Name Namespace", "description": "A string describing the namespace of the HostExtentName property if the value of the HostExtentNameNamespace matches 1 (Other).", "type": "string" }, "Reservation": { "title": "Reservation", "description": "The amount of blocks that are guaranteed to be available for this storage resource allocation at the host.", "type": "string" }, "VirtualQuantity": { "title": "Virtual Quantity", "description": "Number of blocks that are presented to the consumer.", "type": "string" }, "VirtualQuantityUnits": { "title": "Virtual Quantity Units", "description": "This property specifies the units used by the VirtualQuantity property.", "type": "string" }, "VirtualResourceBlockSize": { "title": "Virtual Resource Block Size", "description": "Size in bytes of the blocks that are presented to the consumer as the result of this storage resource allocation or storage resource allocation request.", "type": "string" } }, "objects": [] } glance-12.0.0/etc/metadefs/compute-randomgen.json0000664000567000056710000000202612701407047023050 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::RandomNumberGenerator", "display_name": "Random Number Generator", "description": "If a random-number generator device has been added to the instance through its image properties, the device can be enabled and configured.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "properties": { "hw_rng:allowed": { "title": "Random Number Generator Allowed", "description": "", "type": "boolean" }, "hw_rng:rate_bytes": { "title": "Random number generator limits.", "description": "Allowed amount of bytes that the guest can read from the host's entropy per period.", "type": "integer" }, "hw_rng:rate_period": { "title": "Random number generator read period.", "description": "Duration of the read period in seconds.", "type": "integer" } } }glance-12.0.0/etc/metadefs/compute-aggr-disk-filter.json0000664000567000056710000000210212701407047024224 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::AggregateDiskFilter", "display_name": "Disk Allocation per Host", "description": "Properties related to the Nova scheduler filter AggregateDiskFilter. Filters aggregate hosts based on the available disk space compared to the requested disk space. Hosts in the aggregate with not enough usable disk will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Aggregate" } ], "properties": { "disk_allocation_ratio": { "title": "Disk Subscription Ratio", "description": "Allows the host to be under and over subscribed for the amount of disk space requested for an instance. A ratio greater than 1.0 allows for over subscription (hosts may have less usable disk space than requested). A ratio less than 1.0 allows for under subscription.", "type": "number", "readonly": false } }, "objects": [] } glance-12.0.0/etc/metadefs/software-webservers.json0000664000567000056710000001321412701407047023444 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Software::WebServers", "display_name": "Web Servers", "description": "A web server is a computer system that processes requests via HTTP, the basic network protocol used to distribute information on the World Wide Web. The most common use of web servers is to host websites, but there are other uses such as gaming, data storage, running enterprise applications, handling email, FTP, or other web uses. (http://en.wikipedia.org/wiki/Web_server)", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image" }, { "name": "OS::Cinder::Volume", "properties_target": "image" }, { "name": "OS::Nova::Server", "properties_target": "metadata" } ], "objects": [ { "name": "Apache HTTP Server", "description": "The Apache HTTP Server, colloquially called Apache, is a Web server application notable for playing a key role in the initial growth of the World Wide Web. Apache is developed and maintained by an open community of developers under the auspices of the Apache Software Foundation. Most commonly used on a Unix-like system, the software is available for a wide variety of operating systems, including Unix, FreeBSD, Linux, Solaris, Novell NetWare, OS X, Microsoft Windows, OS/2, TPF, OpenVMS and eComStation. Released under the Apache License, Apache is open-source software. (http://en.wikipedia.org/wiki/Apache_HTTP_Server)", "properties": { "sw_webserver_apache_version": { "title": "Version", "description": "The specific version of Apache.", "type": "string" }, "sw_webserver_apache_http_port": { "title": "HTTP Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 80 }, "sw_webserver_apache_https_port": { "title": "HTTPS Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 443 } } }, { "name": "Nginx", "description": "Nginx (pronounced 'engine-x') is an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server (origin server). The nginx project started with a strong focus on high concurrency, high performance and low memory usage. It is licensed under the 2-clause BSD-like license and it runs on Linux, BSD variants, Mac OS X, Solaris, AIX, HP-UX, as well as on other *nix flavors. It also has a proof of concept port for Microsoft Windows. (http://en.wikipedia.org/wiki/Nginx)", "properties": { "sw_webserver_nginx_version": { "title": "Version", "description": "The specific version of Nginx.", "type": "string" }, "sw_webserver_nginx_http_port": { "title": "HTTP Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 80 }, "sw_webserver_nginx_https_port": { "title": "HTTPS Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 443 } } }, { "name": "IIS", "description": "Internet Information Services (IIS, formerly Internet Information Server) is an extensible web server created by Microsoft. IIS supports HTTP, HTTPS, FTP, FTPS, SMTP and NNTP. IIS is not turned on by default when Windows is installed. The IIS Manager is accessed through the Microsoft Management Console or Administrative Tools in the Control Panel. (http://en.wikipedia.org/wiki/Internet_Information_Services)", "properties": { "sw_webserver_iis_version": { "title": "Version", "description": "The specific version of IIS.", "type": "string" }, "sw_webserver_iis_http_port": { "title": "HTTP Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 80 }, "sw_webserver_iis_https_port": { "title": "HTTPS Port", "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", "type": "integer", "minimum": 1, "maximum": 65535, "default": 443 } } } ] } glance-12.0.0/etc/metadefs/compute-quota.json0000664000567000056710000001605312701407047022234 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::Quota", "display_name": "Flavor Quota", "description": "Compute drivers may enable quotas on CPUs available to a VM, disk tuning, bandwidth I/O, and instance VIF traffic control. See: http://docs.openstack.org/admin-guide-cloud/compute-flavors.html", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Nova::Flavor" } ], "objects": [ { "name": "CPU Limits", "description": "You can configure the CPU limits with control parameters.", "properties": { "quota:cpu_shares": { "title": "Quota: CPU Shares", "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", "type": "integer" }, "quota:cpu_period": { "title": "Quota: CPU Period", "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", "type": "integer", "minimum": 1000, "maximum": 1000000 }, "quota:cpu_quota": { "title": "Quota: CPU Quota", "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", "type": "integer" } } }, { "name": "Disk QoS", "description": "Using disk I/O quotas, you can set maximum disk write to 10 MB per second for a VM user.", "properties": { "quota:disk_read_bytes_sec": { "title": "Quota: Disk read bytes / sec", "description": "Sets disk I/O quota for disk read bytes / sec.", "type": "integer" }, "quota:disk_read_iops_sec": { "title": "Quota: Disk read IOPS / sec", "description": "Sets disk I/O quota for disk read IOPS / sec.", "type": "integer" }, "quota:disk_write_bytes_sec": { "title": "Quota: Disk Write Bytes / sec", "description": "Sets disk I/O quota for disk write bytes / sec.", "type": "integer" }, "quota:disk_write_iops_sec": { "title": "Quota: Disk Write IOPS / sec", "description": "Sets disk I/O quota for disk write IOPS / sec.", "type": "integer" }, "quota:disk_total_bytes_sec": { "title": "Quota: Disk Total Bytes / sec", "description": "Sets disk I/O quota for total disk bytes / sec.", "type": "integer" }, "quota:disk_total_iops_sec": { "title": "Quota: Disk Total IOPS / sec", "description": "Sets disk I/O quota for disk total IOPS / sec.", "type": "integer" } } }, { "name": "Virtual Interface QoS", "description": "Bandwidth QoS tuning for instance virtual interfaces (VIFs) may be specified with these properties. Incoming and outgoing traffic can be shaped independently. If not specified, no quality of service (QoS) is applied on that traffic direction. So, if you want to shape only the network's incoming traffic, use inbound only (and vice versa). The OpenStack Networking service abstracts the physical implementation of the network, allowing plugins to configure and manage physical resources. Virtual Interfaces (VIF) in the logical model are analogous to physical network interface cards (NICs). VIFs are typically owned a managed by an external service; for instance when OpenStack Networking is used for building OpenStack networks, VIFs would be created, owned, and managed in Nova. VIFs are connected to OpenStack Networking networks via ports. A port is analogous to a port on a network switch, and it has an administrative state. When a VIF is attached to a port the OpenStack Networking API creates an attachment object, which specifies the fact that a VIF with a given identifier is plugged into the port.", "properties": { "quota:vif_inbound_average": { "title": "Quota: VIF Inbound Average", "description": "Network Virtual Interface (VIF) inbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", "type": "integer" }, "quota:vif_inbound_burst": { "title": "Quota: VIF Inbound Burst", "description": "Network Virtual Interface (VIF) inbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", "type": "integer" }, "quota:vif_inbound_peak": { "title": "Quota: VIF Inbound Peak", "description": "Network Virtual Interface (VIF) inbound peak in kilobytes per second. Specifies maximum rate at which an interface can receive data.", "type": "integer" }, "quota:vif_outbound_average": { "title": "Quota: VIF Outbound Average", "description": "Network Virtual Interface (VIF) outbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", "type": "integer" }, "quota:vif_outbound_burst": { "title": "Quota: VIF Outbound Burst", "description": "Network Virtual Interface (VIF) outbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", "type": "integer" }, "quota:vif_outbound_peak": { "title": "Quota: VIF Outbound Burst", "description": "Network Virtual Interface (VIF) outbound peak in kilobytes per second. Specifies maximum rate at which an interface can send data.", "type": "integer" } } } ] } glance-12.0.0/etc/metadefs/compute-libvirt.json0000664000567000056710000000301212701407047022545 0ustar jenkinsjenkins00000000000000{ "namespace": "OS::Compute::Libvirt", "display_name": "libvirt Driver Options", "description": "The libvirt compute driver options. \n\nThese are properties that affect the libvirt compute driver and may be specified on flavors and images. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", "visibility": "public", "protected": true, "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_" }, { "name": "OS::Nova::Flavor", "prefix": "hw:" } ], "properties": { "serial_port_count": { "title": "Serial Port Count", "description": "Specifies the count of serial ports that should be provided. If hw:serial_port_count is not set in the flavor's extra_specs, then any count is permitted. If hw:serial_port_count is set, then this provides the default serial port count. It is permitted to override the default serial port count, but only with a lower value.", "type": "integer", "minimum": 0 }, "boot_menu": { "title": "Boot Menu", "description": "If true, enables the BIOS bootmenu. In cases where both the image metadata and Extra Spec are set, the Extra Spec setting is used. This allows for flexibility in setting/overriding the default behavior as needed.", "type": "string", "enum": ["true", "false"] } }, "objects": [] } glance-12.0.0/etc/glance-api.conf0000664000567000056710000017447712701407047017634 0ustar jenkinsjenkins00000000000000[DEFAULT] # # From glance.api # # When true, this option sets the owner of an image to be the tenant. # Otherwise, the owner of the image will be the authenticated user # issuing the request. (boolean value) #owner_is_tenant = true # Role used to identify an authenticated user as administrator. # (string value) #admin_role = admin # Allow unauthenticated users to access the API with read-only # privileges. This only applies when using ContextMiddleware. (boolean # value) #allow_anonymous_access = false # Limits request ID length. (integer value) #max_request_id_length = 64 # Public url to use for versions endpoint. The default is None, which # will use the request's host_url attribute to populate the URL base. # If Glance is operating behind a proxy, you will want to change this # to represent the proxy's URL. (string value) #public_endpoint = # Whether to allow users to specify image properties beyond what the # image schema provides (boolean value) #allow_additional_image_properties = true # Maximum number of image members per image. Negative values evaluate # to unlimited. (integer value) #image_member_quota = 128 # Maximum number of properties allowed on an image. Negative values # evaluate to unlimited. (integer value) #image_property_quota = 128 # Maximum number of tags allowed on an image. Negative values evaluate # to unlimited. (integer value) #image_tag_quota = 128 # Maximum number of locations allowed on an image. Negative values # evaluate to unlimited. (integer value) #image_location_quota = 10 # Python module path of data access API (string value) #data_api = glance.db.sqlalchemy.api # Default value for the number of items returned by a request if not # specified explicitly in the request (integer value) #limit_param_default = 25 # Maximum permissible number of items that could be returned by a # request (integer value) #api_limit_max = 1000 # Whether to include the backend image storage location in image # properties. Revealing storage location can be a security risk, so # use this setting with caution! (boolean value) #show_image_direct_url = false # Whether to include the backend image locations in image properties. # For example, if using the file system store a URL of # "file:///path/to/image" will be returned to the user in the # 'direct_url' meta-data field. Revealing storage location can be a # security risk, so use this setting with caution! Setting this to # true overrides the show_image_direct_url option. (boolean value) #show_multiple_locations = false # Maximum size of image a user can upload in bytes. Defaults to # 1099511627776 bytes (1 TB).WARNING: this value should only be # increased after careful consideration and must be set to a value # under 8 EB (9223372036854775808). (integer value) # Maximum value: 9223372036854775808 #image_size_cap = 1099511627776 # Set a system wide quota for every user. This value is the total # capacity that a user can use across all storage systems. A value of # 0 means unlimited.Optional unit can be specified for the value. # Accepted units are B, KB, MB, GB and TB representing Bytes, # KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no # unit is specified then Bytes is assumed. Note that there should not # be any space between value and unit and units are case sensitive. # (string value) #user_storage_quota = 0 # Deploy the v1 OpenStack Images API. (boolean value) #enable_v1_api = true # Deploy the v2 OpenStack Images API. (boolean value) #enable_v2_api = true # Deploy the v1 OpenStack Registry API. (boolean value) #enable_v1_registry = true # Deploy the v2 OpenStack Registry API. (boolean value) #enable_v2_registry = true # The hostname/IP of the pydev process listening for debug connections # (string value) #pydev_worker_debug_host = # The port on which a pydev process is listening for connections. # (port value) # Minimum value: 0 # Maximum value: 65535 #pydev_worker_debug_port = 5678 # AES key for encrypting store 'location' metadata. This includes, if # used, Swift or S3 credentials. Should be set to a random string of # length 16, 24 or 32 bytes (string value) #metadata_encryption_key = # Digest algorithm which will be used for digital signature. Use the # command "openssl list-message-digest-algorithms" to get the # available algorithms supported by the version of OpenSSL on the # platform. Examples are "sha1", "sha256", "sha512", etc. (string # value) #digest_algorithm = sha256 # This value sets what strategy will be used to determine the image # location order. Currently two strategies are packaged with Glance # 'location_order' and 'store_type'. (string value) # Allowed values: location_order, store_type #location_strategy = location_order # The location of the property protection file.This file contains the # rules for property protections and the roles/policies associated # with it. If this config value is not specified, by default, property # protections won't be enforced. If a value is specified and the file # is not found, then the glance-api service will not start. (string # value) #property_protection_file = # This config value indicates whether "roles" or "policies" are used # in the property protection file. (string value) # Allowed values: roles, policies #property_protection_rule_format = roles # Modules of exceptions that are permitted to be recreated upon # receiving exception data from an rpc call. (list value) #allowed_rpc_exception_modules = glance.common.exception,builtins,exceptions # Address to bind the server. Useful when selecting a particular # network interface. (string value) #bind_host = 0.0.0.0 # The port on which the server will listen. (port value) # Minimum value: 0 # Maximum value: 65535 #bind_port = # The number of child process workers that will be created to service # requests. The default will be equal to the number of CPUs available. # (integer value) #workers = # Maximum line size of message headers to be accepted. max_header_line # may need to be increased when using large tokens (typically those # generated by the Keystone v3 API with big service catalogs (integer # value) #max_header_line = 16384 # If False, server will return the header "Connection: close", If # True, server will return "Connection: Keep-Alive" in its responses. # In order to close the client socket connection explicitly after the # response is sent and read successfully by the client, you simply # have to set this option to False when you create a wsgi server. # (boolean value) #http_keepalive = true # Timeout for client connections' socket operations. If an incoming # connection is idle for this number of seconds it will be closed. A # value of '0' means wait forever. (integer value) #client_socket_timeout = 900 # The backlog value that will be used when creating the TCP listener # socket. (integer value) #backlog = 4096 # The value for the socket option TCP_KEEPIDLE. This is the time in # seconds that the connection must be idle before TCP starts sending # keepalive probes. (integer value) #tcp_keepidle = 600 # CA certificate file to use to verify connecting clients. (string # value) #ca_file = # Certificate file to use when starting API server securely. (string # value) #cert_file = # Private key file to use when starting API server securely. (string # value) #key_file = # The path to the sqlite file database that will be used for image # cache management. (string value) #image_cache_sqlite_db = cache.db # The driver to use for image cache management. (string value) #image_cache_driver = sqlite # The upper limit (the maximum size of accumulated cache in bytes) # beyond which the cache pruner, if running, starts cleaning the image # cache. (integer value) #image_cache_max_size = 10737418240 # The amount of time to let an incomplete image remain in the cache, # before the cache cleaner, if running, will remove the incomplete # image. (integer value) #image_cache_stall_time = 86400 # Base directory that the image cache uses. (string value) #image_cache_dir = # Default publisher_id for outgoing notifications. (string value) #default_publisher_id = image.localhost # List of disabled notifications. A notification can be given either # as a notification type to disable a single event, or as a # notification group prefix to disable all events within a group. # Example: if this config option is set to ["image.create", # "metadef_namespace"], then "image.create" notification will not be # sent after image is created and none of the notifications for # metadefinition namespaces will be sent. (list value) #disabled_notifications = # Address to find the registry server. (string value) #registry_host = 0.0.0.0 # Port the registry server is listening on. (port value) # Minimum value: 0 # Maximum value: 65535 #registry_port = 9191 # Whether to pass through the user token when making requests to the # registry. To prevent failures with token expiration during big files # upload, it is recommended to set this parameter to False.If # "use_user_token" is not in effect, then admin credentials can be # specified. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #use_user_token = true # The administrators user name. If "use_user_token" is not in effect, # then admin credentials can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_user = # The administrators password. If "use_user_token" is not in effect, # then admin credentials can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_password = # The tenant name of the administrative user. If "use_user_token" is # not in effect, then admin tenant name can be specified. (string # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_tenant_name = # The URL to the keystone service. If "use_user_token" is not in # effect and using keystone auth, then URL of keystone can be # specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_url = # The strategy to use for authentication. If "use_user_token" is not # in effect, then auth strategy can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_strategy = noauth # The region for the authentication service. If "use_user_token" is # not in effect and using keystone auth, then region name can be # specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_region = # The protocol to use for communication with the registry server. # Either http or https. (string value) #registry_client_protocol = http # The path to the key file to use in SSL connections to the registry # server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE # environment variable to a filepath of the key file (string value) #registry_client_key_file = # The path to the cert file to use in SSL connections to the registry # server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE # environment variable to a filepath of the CA cert file (string # value) #registry_client_cert_file = # The path to the certifying authority cert file to use in SSL # connections to the registry server, if any. Alternately, you may set # the GLANCE_CLIENT_CA_FILE environment variable to a filepath of the # CA cert file. (string value) #registry_client_ca_file = # When using SSL in connections to the registry server, do not require # validation via a certifying authority. This is the registry's # equivalent of specifying --insecure on the command line using # glanceclient for the API. (boolean value) #registry_client_insecure = false # The period of time, in seconds, that the API server will wait for a # registry request to complete. A value of 0 implies no timeout. # (integer value) #registry_client_timeout = 600 # Whether to pass through headers containing user and tenant # information when making requests to the registry. This allows the # registry to use the context middleware without keystonemiddleware's # auth_token middleware, removing calls to the keystone auth service. # It is recommended that when using this option, secure communication # between glance api and glance registry is ensured by means other # than auth_token middleware. (boolean value) #send_identity_headers = false # The amount of time in seconds to delay before performing a delete. # (integer value) #scrub_time = 0 # The size of thread pool to be used for scrubbing images. The default # is one, which signifies serial scrubbing. Any value above one # indicates the max number of images that may be scrubbed in parallel. # (integer value) #scrub_pool_size = 1 # Turn on/off delayed delete. (boolean value) #delayed_delete = false # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of # the default INFO level. (boolean value) #debug = false # If set to false, the logging level will be set to WARNING instead of # the default INFO level. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #verbose = true # The name of a logging configuration file. This file is appended to # any existing logging configuration files. For details about logging # configuration files, see the Python logging module documentation. # Note that when logging configuration files are used then all logging # configuration is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. # (string value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default # is set, logging will go to stderr as defined by use_stderr. This # option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. # This option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Uses logging handler designed to watch file system. When log file is # moved or removed this handler will open a new log file with # specified path instantaneously. It makes sense only if log_file # option is specified and Linux platform is used. This option is # ignored if log_config_append is set. (boolean value) #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and # will be changed later to honor RFC5424. This option is ignored if # log_config_append is set. (boolean value) #use_syslog = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if # log_config_append is set. (boolean value) #use_stderr = true # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. # (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the # message is DEBUG. (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. (string # value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. (string value) #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is # ignored if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. # (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. # (string value) #instance_uuid_format = "[instance: %(uuid)s] " # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false # # From oslo.messaging # # Size of RPC connection pool. (integer value) # Deprecated group/name - [DEFAULT]/rpc_conn_pool_size #rpc_conn_pool_size = 30 # ZeroMQ bind address. Should be a wildcard (*), an ethernet # interface, or IP. The "host" option should point or resolve to this # address. (string value) #rpc_zmq_bind_address = * # MatchMaker driver. (string value) # Allowed values: redis, dummy #rpc_zmq_matchmaker = redis # Type of concurrency used. Either "native" or "eventlet" (string # value) #rpc_zmq_concurrency = eventlet # Number of ZeroMQ contexts, defaults to 1. (integer value) #rpc_zmq_contexts = 1 # Maximum number of ingress messages to locally buffer per topic. # Default is unlimited. (integer value) #rpc_zmq_topic_backlog = # Directory for holding IPC sockets. (string value) #rpc_zmq_ipc_dir = /var/run/openstack # Name of this node. Must be a valid hostname, FQDN, or IP address. # Must match "host" option, if running Nova. (string value) #rpc_zmq_host = localhost # Seconds to wait before a cast expires (TTL). The default value of -1 # specifies an infinite linger period. The value of 0 specifies no # linger period. Pending messages shall be discarded immediately when # the socket is closed. Only supported by impl_zmq. (integer value) #rpc_cast_timeout = -1 # The default number of seconds that poll should wait. Poll raises # timeout exception when timeout expired. (integer value) #rpc_poll_timeout = 1 # Expiration timeout in seconds of a name service record about # existing target ( < 0 means no timeout). (integer value) #zmq_target_expire = 120 # Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. # (boolean value) #use_pub_sub = true # Minimal port number for random ports range. (port value) # Minimum value: 0 # Maximum value: 65535 #rpc_zmq_min_port = 49152 # Maximal port number for random ports range. (integer value) # Minimum value: 1 # Maximum value: 65536 #rpc_zmq_max_port = 65536 # Number of retries to find free port number before fail with # ZMQBindError. (integer value) #rpc_zmq_bind_port_retries = 100 # Size of executor thread pool. (integer value) # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size #executor_thread_pool_size = 64 # Seconds to wait for a response from a call. (integer value) #rpc_response_timeout = 60 # A URL representing the messaging driver to use and its full # configuration. If not set, we fall back to the rpc_backend option # and driver specific configuration. (string value) #transport_url = # The messaging driver to use, defaults to rabbit. Other drivers # include amqp and zmq. (string value) #rpc_backend = rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the transport_url # option. (string value) #control_exchange = openstack [cors] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain # received in the requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials # (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to # HTTP Simple Headers. (list value) #expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list # value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual # request. (list value) #allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID [cors.subdomain] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain # received in the requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials # (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to # HTTP Simple Headers. (list value) #expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list # value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual # request. (list value) #allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID [database] # # From oslo.db # # The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) # Deprecated group/name - [DEFAULT]/sqlite_synchronous #sqlite_synchronous = true # The back end to use for the database. (string value) # Deprecated group/name - [DEFAULT]/db_backend #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. # (string value) # Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [sql]/connection #connection = # The SQLAlchemy connection string to use to connect to the slave # database. (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including # the default, overrides any server-set SQL mode. To use whatever SQL # mode is set by the server configuration, set this to no value. # Example: mysql_sql_mode= (string value) #mysql_sql_mode = TRADITIONAL # Timeout before idle SQL connections are reaped. (integer value) # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout #idle_timeout = 3600 # Minimum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_min_pool_size # Deprecated group/name - [DATABASE]/sql_min_pool_size #min_pool_size = 1 # Maximum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_pool_size # Deprecated group/name - [DATABASE]/sql_max_pool_size #max_pool_size = # Maximum number of database connection retries during startup. Set to # -1 to specify an infinite retry count. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries #max_retries = 10 # Interval between retries of opening a SQL connection. (integer # value) # Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DATABASE]/reconnect_interval #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_overflow # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. # (integer value) # Deprecated group/name - [DEFAULT]/sql_connection_debug #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) # Deprecated group/name - [DEFAULT]/sql_connection_trace #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer # value) # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout #pool_timeout = # Enable the experimental use of database reconnect on connection # lost. (boolean value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database # operation up to db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries # of a database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before # error is raised. Set to -1 to specify an infinite retry count. # (integer value) #db_max_retries = 20 # # From oslo.db.concurrency # # Enable the experimental use of thread pooling for all DB API calls # (boolean value) # Deprecated group/name - [DEFAULT]/dbapi_use_tpool #use_tpool = false [glance_store] # # From glance.store # # List of stores enabled. Valid stores are: cinder, file, http, rbd, # sheepdog, swift, s3, vsphere (list value) #stores = file,http # Default scheme to use to store image data. The scheme must be # registered by one of the stores defined by the 'stores' config # option. (string value) #default_store = file # Minimum interval seconds to execute updating dynamic storage # capabilities based on backend status then. It's not a periodic # routine, the update logic will be executed only when interval # seconds elapsed and an operation of store has triggered. The feature # will be enabled only when the option value greater then zero. # (integer value) #store_capabilities_update_min_interval = 0 # Specify the path to the CA bundle file to use in verifying the # remote server certificate. (string value) #https_ca_certificates_file = # If true, the remote server certificate is not verified. If false, # then the default CA truststore is used for verification. This option # is ignored if "https_ca_certificates_file" is set. (boolean value) #https_insecure = true # Specify the http/https proxy information that should be used to # connect to the remote server. The proxy information should be a key # value pair of the scheme and proxy. e.g. http:10.0.0.1:3128. You can # specify proxies for multiple schemes by seperating the key value # pairs with a comma.e.g. http:10.0.0.1:3128, https:10.0.0.1:1080. # (dict value) #http_proxy_information = # If True, swiftclient won't check for a valid SSL certificate when # authenticating. (boolean value) #swift_store_auth_insecure = false # A string giving the CA certificate file to use in SSL connections # for verifying certs. (string value) #swift_store_cacert = # The region of the swift endpoint to be used for single tenant. This # setting is only necessary if the tenant has multiple swift # endpoints. (string value) #swift_store_region = # If set, the configured endpoint will be used. If None, the storage # url from the auth response will be used. (string value) #swift_store_endpoint = # A string giving the endpoint type of the swift service to use # (publicURL, adminURL or internalURL). This setting is only used if # swift_store_auth_version is 2. (string value) #swift_store_endpoint_type = publicURL # A string giving the service type of the swift service to use. This # setting is only used if swift_store_auth_version is 2. (string # value) #swift_store_service_type = object-store # Container within the account that the account should use for storing # images in Swift when using single container mode. In multiple # container mode, this will be the prefix for all containers. (string # value) #swift_store_container = glance # The size, in MB, that Glance will start chunking image files and do # a large object manifest in Swift. (integer value) #swift_store_large_object_size = 5120 # The amount of data written to a temporary disk buffer during the # process of chunking the image file. (integer value) #swift_store_large_object_chunk_size = 200 # A boolean value that determines if we create the container if it # does not exist. (boolean value) #swift_store_create_container_on_put = false # If set to True, enables multi-tenant storage mode which causes # Glance images to be stored in tenant specific Swift accounts. # (boolean value) #swift_store_multi_tenant = false # When set to 0, a single-tenant store will only use one container to # store all images. When set to an integer value between 1 and 32, a # single-tenant store will use multiple containers to store images, # and this value will determine how many containers are created.Used # only when swift_store_multi_tenant is disabled. The total number of # containers that will be used is equal to 16^N, so if this config # option is set to 2, then 16^2=256 containers will be used to store # images. (integer value) #swift_store_multiple_containers_seed = 0 # A list of tenants that will be granted read/write access on all # Swift containers created by Glance in multi-tenant mode. (list # value) #swift_store_admin_tenants = # If set to False, disables SSL layer compression of https swift # requests. Setting to False may improve performance for images which # are already in a compressed format, eg qcow2. (boolean value) #swift_store_ssl_compression = true # The number of times a Swift download will be retried before the # request fails. (integer value) #swift_store_retry_get_count = 0 # The period of time (in seconds) before token expirationwhen # glance_store will try to reques new user token. Default value 60 sec # means that if token is going to expire in 1 min then glance_store # request new user token. (integer value) #swift_store_expire_soon_interval = 60 # If set to True create a trust for each add/get request to Multi- # tenant store in order to prevent authentication token to be expired # during uploading/downloading data. If set to False then user token # is used for Swift connection (so no overhead on trust creation). # Please note that this option is considered only and only if # swift_store_multi_tenant=True (boolean value) #swift_store_use_trusts = true # The reference to the default swift account/backing store parameters # to use for adding new images. (string value) #default_swift_reference = ref1 # Version of the authentication service to use. Valid versions are 2 # and 3 for keystone and 1 (deprecated) for swauth and rackspace. # (deprecated - use "auth_version" in swift_store_config_file) (string # value) #swift_store_auth_version = 2 # The address where the Swift authentication service is listening. # (deprecated - use "auth_address" in swift_store_config_file) (string # value) #swift_store_auth_address = # The user to authenticate against the Swift authentication service # (deprecated - use "user" in swift_store_config_file) (string value) #swift_store_user = # Auth key for the user authenticating against the Swift # authentication service. (deprecated - use "key" in # swift_store_config_file) (string value) #swift_store_key = # The config file that has the swift account(s)configs. (string value) #swift_store_config_file = # RADOS images will be chunked into objects of this size (in # megabytes). For best performance, this should be a power of two. # (integer value) #rbd_store_chunk_size = 8 # RADOS pool in which images are stored. (string value) #rbd_store_pool = images # RADOS user to authenticate as (only applicable if using Cephx. If # , a default will be chosen based on the client. section in # rbd_store_ceph_conf) (string value) #rbd_store_user = # Ceph configuration file path. If , librados will locate the # default config. If using cephx authentication, this file should # include a reference to the right keyring in a client. section # (string value) #rbd_store_ceph_conf = /etc/ceph/ceph.conf # Timeout value (in seconds) used when connecting to ceph cluster. If # value <= 0, no timeout is set and default librados value is used. # (integer value) #rados_connect_timeout = 0 # Info to match when looking for cinder in the service catalog. Format # is : separated values of the form: # :: (string value) #cinder_catalog_info = volumev2::publicURL # Override service catalog lookup with template for cinder endpoint # e.g. http://localhost:8776/v2/%(tenant)s (string value) #cinder_endpoint_template = # Region name of this node. If specified, it will be used to locate # OpenStack services for stores. (string value) # Deprecated group/name - [DEFAULT]/os_region_name #cinder_os_region_name = # Location of ca certicates file to use for cinder client requests. # (string value) #cinder_ca_certificates_file = # Number of cinderclient retries on failed http calls (integer value) #cinder_http_retries = 3 # Time period of time in seconds to wait for a cinder volume # transition to complete. (integer value) #cinder_state_transition_timeout = 300 # Allow to perform insecure SSL requests to cinder (boolean value) #cinder_api_insecure = false # The address where the Cinder authentication service is listening. If # , the cinder endpoint in the service catalog is used. (string # value) #cinder_store_auth_address = # User name to authenticate against Cinder. If , the user of # current context is used. (string value) #cinder_store_user_name = # Password for the user authenticating against Cinder. If , the # current context auth token is used. (string value) #cinder_store_password = # Project name where the image is stored in Cinder. If , the # project in current context is used. (string value) #cinder_store_project_name = # Path to the rootwrap configuration file to use for running commands # as root. (string value) #rootwrap_config = /etc/glance/rootwrap.conf # The host where the S3 server is listening. (string value) #s3_store_host = # The S3 query token access key. (string value) #s3_store_access_key = # The S3 query token secret key. (string value) #s3_store_secret_key = # The S3 bucket to be used to store the Glance data. (string value) #s3_store_bucket = # The local directory where uploads will be staged before they are # transferred into S3. (string value) #s3_store_object_buffer_dir = # A boolean to determine if the S3 bucket should be created on upload # if it does not exist or if an error should be returned to the user. # (boolean value) #s3_store_create_bucket_on_put = false # The S3 calling format used to determine the bucket. Either subdomain # or path can be used. (string value) #s3_store_bucket_url_format = subdomain # What size, in MB, should S3 start chunking image files and do a # multipart upload in S3. (integer value) #s3_store_large_object_size = 100 # What multipart upload part size, in MB, should S3 use when uploading # parts. The size must be greater than or equal to 5M. (integer value) #s3_store_large_object_chunk_size = 10 # The number of thread pools to perform a multipart upload in S3. # (integer value) #s3_store_thread_pools = 10 # Enable the use of a proxy. (boolean value) #s3_store_enable_proxy = false # Address or hostname for the proxy server. (string value) #s3_store_proxy_host = # The port to use when connecting over a proxy. (integer value) #s3_store_proxy_port = 8080 # The username to connect to the proxy. (string value) #s3_store_proxy_user = # The password to use when connecting over a proxy. (string value) #s3_store_proxy_password = # Images will be chunked into objects of this size (in megabytes). For # best performance, this should be a power of two. (integer value) #sheepdog_store_chunk_size = 64 # Port of sheep daemon. (integer value) #sheepdog_store_port = 7000 # IP address of sheep daemon. (string value) #sheepdog_store_address = localhost # Directory to which the Filesystem backend store writes images. # (string value) #filesystem_store_datadir = /var/lib/glance/images # List of directories and its priorities to which the Filesystem # backend store writes images. (multi valued) #filesystem_store_datadirs = # The path to a file which contains the metadata to be returned with # any location associated with this store. The file must contain a # valid JSON object. The object should contain the keys 'id' and # 'mountpoint'. The value for both keys should be 'string'. (string # value) #filesystem_store_metadata_file = # The required permission for created image file. In this way the user # other service used, e.g. Nova, who consumes the image could be the # exclusive member of the group that owns the files created. Assigning # it less then or equal to zero means don't change the default # permission of the file. This value will be decoded as an octal # digit. (integer value) #filesystem_store_file_perm = 0 # ESX/ESXi or vCenter Server target system. The server value can be an # IP address or a DNS name. (string value) #vmware_server_host = # Username for authenticating with VMware ESX/VC server. (string # value) #vmware_server_username = # Password for authenticating with VMware ESX/VC server. (string # value) #vmware_server_password = # Number of times VMware ESX/VC server API must be retried upon # connection related issues. (integer value) #vmware_api_retry_count = 10 # The interval used for polling remote tasks invoked on VMware ESX/VC # server. (integer value) #vmware_task_poll_interval = 5 # The name of the directory where the glance images will be stored in # the VMware datastore. (string value) #vmware_store_image_dir = /openstack_glance # If true, the ESX/vCenter server certificate is not verified. If # false, then the default CA truststore is used for verification. This # option is ignored if "vmware_ca_file" is set. (boolean value) # Deprecated group/name - [DEFAULT]/vmware_api_insecure #vmware_insecure = false # Specify a CA bundle file to use in verifying the ESX/vCenter server # certificate. (string value) #vmware_ca_file = # A list of datastores where the image can be stored. This option may # be specified multiple times for specifying multiple datastores. The # datastore name should be specified after its datacenter path, # seperated by ":". An optional weight may be given after the # datastore name, seperated again by ":". Thus, the required format # becomes ::. When # adding an image, the datastore with highest weight will be selected, # unless there is not enough free space available in cases where the # image size is already known. If no weight is given, it is assumed to # be zero and the directory will be considered for selection last. If # multiple datastores have the same weight, then the one with the most # free space available is selected. (multi valued) #vmware_datastores = [image_format] # # From glance.api # # Supported values for the 'container_format' image attribute (list # value) # Deprecated group/name - [DEFAULT]/container_formats #container_formats = ami,ari,aki,bare,ovf,ova,docker # Supported values for the 'disk_format' image attribute (list value) # Deprecated group/name - [DEFAULT]/disk_formats #disk_formats = ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso [keystone_authtoken] # # From keystonemiddleware.auth_token # # Complete public Identity API endpoint. (string value) #auth_uri = # API version of the admin Identity API endpoint. (string value) #auth_version = # Do not handle authorization requests within the middleware, but # delegate the authorization decision to downstream WSGI components. # (boolean value) #delay_auth_decision = false # Request timeout value for communicating with Identity API server. # (integer value) #http_connect_timeout = # How many times are we trying to reconnect when communicating with # Identity API Server. (integer value) #http_request_max_retries = 3 # Env key for the swift cache. (string value) #cache = # Required if identity server requires client certificate (string # value) #certfile = # Required if identity server requires client certificate (string # value) #keyfile = # A PEM encoded Certificate Authority to use when verifying HTTPs # connections. Defaults to system CAs. (string value) #cafile = # Verify HTTPS connections. (boolean value) #insecure = false # The region in which the identity server can be found. (string value) #region_name = # Directory used to cache files related to PKI tokens. (string value) #signing_dir = # Optionally specify a list of memcached server(s) to use for caching. # If left undefined, tokens will instead be cached in-process. (list # value) # Deprecated group/name - [DEFAULT]/memcache_servers #memcached_servers = # In order to prevent excessive effort spent validating tokens, the # middleware caches previously-seen tokens for a configurable duration # (in seconds). Set to -1 to disable caching completely. (integer # value) #token_cache_time = 300 # Determines the frequency at which the list of revoked tokens is # retrieved from the Identity service (in seconds). A high number of # revocation events combined with a low cache duration may # significantly reduce performance. (integer value) #revocation_cache_time = 10 # (Optional) If defined, indicate whether token data should be # authenticated or authenticated and encrypted. If MAC, token data is # authenticated (with HMAC) in the cache. If ENCRYPT, token data is # encrypted and authenticated in the cache. If the value is not one of # these options or empty, auth_token will raise an exception on # initialization. (string value) # Allowed values: None, MAC, ENCRYPT #memcache_security_strategy = None # (Optional, mandatory if memcache_security_strategy is defined) This # string is used for key derivation. (string value) #memcache_secret_key = # (Optional) Number of seconds memcached server is considered dead # before it is tried again. (integer value) #memcache_pool_dead_retry = 300 # (Optional) Maximum total number of open connections to every # memcached server. (integer value) #memcache_pool_maxsize = 10 # (Optional) Socket timeout in seconds for communicating with a # memcached server. (integer value) #memcache_pool_socket_timeout = 3 # (Optional) Number of seconds a connection to memcached is held # unused in the pool before it is closed. (integer value) #memcache_pool_unused_timeout = 60 # (Optional) Number of seconds that an operation will wait to get a # memcached client connection from the pool. (integer value) #memcache_pool_conn_get_timeout = 10 # (Optional) Use the advanced (eventlet safe) memcached client pool. # The advanced pool will only work under python 2.x. (boolean value) #memcache_use_advanced_pool = false # (Optional) Indicate whether to set the X-Service-Catalog header. If # False, middleware will not ask for service catalog on token # validation and will not set the X-Service-Catalog header. (boolean # value) #include_service_catalog = true # Used to control the use and type of token binding. Can be set to: # "disabled" to not check token binding. "permissive" (default) to # validate binding information if the bind type is of a form known to # the server and ignore it if not. "strict" like "permissive" but if # the bind type is unknown the token will be rejected. "required" any # form of token binding is needed to be allowed. Finally the name of a # binding method that must be present in tokens. (string value) #enforce_token_bind = permissive # If true, the revocation list will be checked for cached tokens. This # requires that PKI tokens are configured on the identity server. # (boolean value) #check_revocations_for_cached = false # Hash algorithms to use for hashing PKI tokens. This may be a single # algorithm or multiple. The algorithms are those supported by Python # standard hashlib.new(). The hashes will be tried in the order given, # so put the preferred one first for performance. The result of the # first hash will be stored in the cache. This will typically be set # to multiple values only while migrating from a less secure algorithm # to a more secure one. Once all the old tokens are expired this # option should be set to a single value for better performance. (list # value) #hash_algorithms = md5 # Authentication type to load (unknown value) # Deprecated group/name - [DEFAULT]/auth_plugin #auth_type = # Config Section from which to load plugin specific options (unknown # value) #auth_section = [matchmaker_redis] # # From oslo.messaging # # Host to locate redis. (string value) #host = 127.0.0.1 # Use this port to connect to redis host. (port value) # Minimum value: 0 # Maximum value: 65535 #port = 6379 # Password for Redis server (optional). (string value) #password = # List of Redis Sentinel hosts (fault tolerance mode) e.g. # [host:port, host1:port ... ] (list value) #sentinel_hosts = # Redis replica set name. (string value) #sentinel_group_name = oslo-messaging-zeromq # Time in ms to wait between connection attempts. (integer value) #wait_timeout = 500 # Time in ms to wait before the transaction is killed. (integer value) #check_timeout = 20000 # Timeout in ms on blocking socket operations (integer value) #socket_timeout = 1000 [oslo_concurrency] # # From oslo.concurrency # # Enables or disables inter-process locks. (boolean value) # Deprecated group/name - [DEFAULT]/disable_process_locking #disable_process_locking = false # Directory to use for lock files. For security, the specified # directory should only be writable by the user running the processes # that need locking. Defaults to environment variable OSLO_LOCK_PATH. # If external locks are used, a lock path must be set. (string value) # Deprecated group/name - [DEFAULT]/lock_path #lock_path = [oslo_messaging_amqp] # # From oslo.messaging # # address prefix used when sending to a specific server (string value) # Deprecated group/name - [amqp1]/server_request_prefix #server_request_prefix = exclusive # address prefix used when broadcasting to all servers (string value) # Deprecated group/name - [amqp1]/broadcast_prefix #broadcast_prefix = broadcast # address prefix when sending to any server in group (string value) # Deprecated group/name - [amqp1]/group_request_prefix #group_request_prefix = unicast # Name for the AMQP container (string value) # Deprecated group/name - [amqp1]/container_name #container_name = # Timeout for inactive connections (in seconds) (integer value) # Deprecated group/name - [amqp1]/idle_timeout #idle_timeout = 0 # Debug: dump AMQP frames to stdout (boolean value) # Deprecated group/name - [amqp1]/trace #trace = false # CA certificate PEM file to verify server certificate (string value) # Deprecated group/name - [amqp1]/ssl_ca_file #ssl_ca_file = # Identifying certificate PEM file to present to clients (string # value) # Deprecated group/name - [amqp1]/ssl_cert_file #ssl_cert_file = # Private key PEM file used to sign cert_file certificate (string # value) # Deprecated group/name - [amqp1]/ssl_key_file #ssl_key_file = # Password for decrypting ssl_key_file (if encrypted) (string value) # Deprecated group/name - [amqp1]/ssl_key_password #ssl_key_password = # Accept clients using either SSL or plain TCP (boolean value) # Deprecated group/name - [amqp1]/allow_insecure_clients #allow_insecure_clients = false # Space separated list of acceptable SASL mechanisms (string value) # Deprecated group/name - [amqp1]/sasl_mechanisms #sasl_mechanisms = # Path to directory that contains the SASL configuration (string # value) # Deprecated group/name - [amqp1]/sasl_config_dir #sasl_config_dir = # Name of configuration file (without .conf suffix) (string value) # Deprecated group/name - [amqp1]/sasl_config_name #sasl_config_name = # User name for message broker authentication (string value) # Deprecated group/name - [amqp1]/username #username = # Password for message broker authentication (string value) # Deprecated group/name - [amqp1]/password #password = [oslo_messaging_notifications] # # From oslo.messaging # # The Drivers(s) to handle sending notifications. Possible values are # messaging, messagingv2, routing, log, test, noop (multi valued) # Deprecated group/name - [DEFAULT]/notification_driver #driver = # A URL representing the messaging driver to use for notifications. If # not set, we fall back to the same configuration used for RPC. # (string value) # Deprecated group/name - [DEFAULT]/notification_transport_url #transport_url = # AMQP topic used for OpenStack notifications. (list value) # Deprecated group/name - [rpc_notifier2]/topics # Deprecated group/name - [DEFAULT]/notification_topics #topics = notifications [oslo_messaging_rabbit] # # From oslo.messaging # # Use durable queues in AMQP. (boolean value) # Deprecated group/name - [DEFAULT]/amqp_durable_queues # Deprecated group/name - [DEFAULT]/rabbit_durable_queues #amqp_durable_queues = false # Auto-delete queues in AMQP. (boolean value) # Deprecated group/name - [DEFAULT]/amqp_auto_delete #amqp_auto_delete = false # SSL version to use (valid only if SSL enabled). Valid values are # TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be # available on some distributions. (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_version #kombu_ssl_version = # SSL key file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile #kombu_ssl_keyfile = # SSL cert file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile #kombu_ssl_certfile = # SSL certification authority file (valid only if SSL enabled). # (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs #kombu_ssl_ca_certs = # How long to wait before reconnecting in response to an AMQP consumer # cancel notification. (floating point value) # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay #kombu_reconnect_delay = 1.0 # EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression # will not be used. This option may notbe available in future # versions. (string value) #kombu_compression = # How long to wait a missing client beforce abandoning to send it its # replies. This value should not be longer than rpc_response_timeout. # (integer value) # Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout #kombu_missing_consumer_retry_timeout = 60 # Determines how the next RabbitMQ node is chosen in case the one we # are currently connected to becomes unavailable. Takes effect only if # more than one RabbitMQ node is provided in config. (string value) # Allowed values: round-robin, shuffle #kombu_failover_strategy = round-robin # The RabbitMQ broker address where a single node is used. (string # value) # Deprecated group/name - [DEFAULT]/rabbit_host #rabbit_host = localhost # The RabbitMQ broker port where a single node is used. (port value) # Minimum value: 0 # Maximum value: 65535 # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port = 5672 # RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts = $rabbit_host:$rabbit_port # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl = false # The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid #rabbit_userid = guest # The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password #rabbit_password = guest # The RabbitMQ login method. (string value) # Deprecated group/name - [DEFAULT]/rabbit_login_method #rabbit_login_method = AMQPLAIN # The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host = / # How frequently to retry connecting with RabbitMQ. (integer value) #rabbit_retry_interval = 1 # How long to backoff for between retries when connecting to RabbitMQ. # (integer value) # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff #rabbit_retry_backoff = 2 # Maximum interval of RabbitMQ connection retries. Default is 30 # seconds. (integer value) #rabbit_interval_max = 30 # Maximum number of RabbitMQ connection retries. Default is 0 # (infinite retry count). (integer value) # Deprecated group/name - [DEFAULT]/rabbit_max_retries #rabbit_max_retries = 0 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change # this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, # queue mirroring is no longer controlled by the x-ha-policy argument # when declaring a queue. If you just want to make sure that all # queues (except those with auto-generated names) are mirrored across # all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha- # mode": "all"}' " (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_ha_queues #rabbit_ha_queues = false # Positive integer representing duration in seconds for queue TTL # (x-expires). Queues which are unused for the duration of the TTL are # automatically deleted. The parameter affects only reply and fanout # queues. (integer value) # Minimum value: 1 #rabbit_transient_queues_ttl = 600 # Specifies the number of messages to prefetch. Setting to zero allows # unlimited messages. (integer value) #rabbit_qos_prefetch_count = 0 # Number of seconds after which the Rabbit broker is considered down # if heartbeat's keep-alive fails (0 disable the heartbeat). # EXPERIMENTAL (integer value) #heartbeat_timeout_threshold = 60 # How often times during the heartbeat_timeout_threshold we check the # heartbeat. (integer value) #heartbeat_rate = 2 # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake # (boolean value) # Deprecated group/name - [DEFAULT]/fake_rabbit #fake_rabbit = false # Maximum number of channels to allow (integer value) #channel_max = # The maximum byte size for an AMQP frame (integer value) #frame_max = # How often to send heartbeats for consumer's connections (integer # value) #heartbeat_interval = 1 # Enable SSL (boolean value) #ssl = # Arguments passed to ssl.wrap_socket (dict value) #ssl_options = # Set socket timeout in seconds for connection's socket (floating # point value) #socket_timeout = 0.25 # Set TCP_USER_TIMEOUT in seconds for connection's socket (floating # point value) #tcp_user_timeout = 0.25 # Set delay for reconnection to some host which has connection error # (floating point value) #host_connection_reconnect_delay = 0.25 # Maximum number of connections to keep queued. (integer value) #pool_max_size = 10 # Maximum number of connections to create above `pool_max_size`. # (integer value) #pool_max_overflow = 0 # Default number of seconds to wait for a connections to available # (integer value) #pool_timeout = 30 # Lifetime of a connection (since creation) in seconds or None for no # recycling. Expired connections are closed on acquire. (integer # value) #pool_recycle = 600 # Threshold at which inactive (since release) connections are # considered stale in seconds or None for no staleness. Stale # connections are closed on acquire. (integer value) #pool_stale = 60 # Persist notification messages. (boolean value) #notification_persistence = false # Exchange name for for sending notifications (string value) #default_notification_exchange = ${control_exchange}_notification # Max number of not acknowledged message which RabbitMQ can send to # notification listener. (integer value) #notification_listener_prefetch_count = 100 # Reconnecting retry count in case of connectivity problem during # sending notification, -1 means infinite retry. (integer value) #default_notification_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending notification message (floating point value) #notification_retry_delay = 0.25 # Time to live for rpc queues without consumers in seconds. (integer # value) #rpc_queue_expiration = 60 # Exchange name for sending RPC messages (string value) #default_rpc_exchange = ${control_exchange}_rpc # Exchange name for receiving RPC replies (string value) #rpc_reply_exchange = ${control_exchange}_rpc_reply # Max number of not acknowledged message which RabbitMQ can send to # rpc listener. (integer value) #rpc_listener_prefetch_count = 100 # Max number of not acknowledged message which RabbitMQ can send to # rpc reply listener. (integer value) #rpc_reply_listener_prefetch_count = 100 # Reconnecting retry count in case of connectivity problem during # sending reply. -1 means infinite retry during rpc_timeout (integer # value) #rpc_reply_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending reply. (floating point value) #rpc_reply_retry_delay = 0.25 # Reconnecting retry count in case of connectivity problem during # sending RPC message, -1 means infinite retry. If actual retry # attempts in not 0 the rpc request could be processed more then one # time (integer value) #default_rpc_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending RPC message (floating point value) #rpc_retry_delay = 0.25 [oslo_policy] # # From oslo.policy # # The JSON file that defines policies. (string value) # Deprecated group/name - [DEFAULT]/policy_file #policy_file = policy.json # Default rule. Enforced when a requested rule is not found. (string # value) # Deprecated group/name - [DEFAULT]/policy_default_rule #policy_default_rule = default # Directories where policy configuration files are stored. They can be # relative to any directory in the search path defined by the # config_dir option, or absolute paths. The file defined by # policy_file must exist for these directories to be searched. # Missing or empty directories are ignored. (multi valued) # Deprecated group/name - [DEFAULT]/policy_dirs #policy_dirs = policy.d [paste_deploy] # # From glance.api # # Partial name of a pipeline in your paste configuration file with the # service name removed. For example, if your paste section name is # [pipeline:glance-api-keystone] use the value "keystone" (string # value) #flavor = # Name of the paste configuration file. (string value) #config_file = [profiler] # # From glance.api # # If False fully disable profiling feature. (boolean value) #enabled = false # If False doesn't trace SQL requests. (boolean value) #trace_sqlalchemy = false # Secret key to use to sign Glance API and Glance Registry services # tracing messages. (string value) #hmac_keys = SECRET_KEY [store_type_location_strategy] # # From glance.api # # The store names to use to get store preference order. The name must # be registered by one of the stores defined by the 'stores' config # option. This option will be applied when you using 'store_type' # option as image location strategy defined by the 'location_strategy' # config option. (list value) #store_type_preference = [task] # # From glance.api # # Time in hours for which a task lives after, either succeeding or # failing (integer value) # Deprecated group/name - [DEFAULT]/task_time_to_live #task_time_to_live = 48 # Specifies which task executor to be used to run the task scripts. # (string value) #task_executor = taskflow # Work dir for asynchronous task operations. The directory set here # will be used to operate over images - normally before they are # imported in the destination store. When providing work dir, make # sure enough space is provided for concurrent tasks to run # efficiently without running out of space. A rough estimation can be # done by multiplying the number of `max_workers` - or the N of # workers running - by an average image size (e.g 500MB). The image # size estimation should be done based on the average size in your # deployment. Note that depending on the tasks running you may need to # multiply this number by some factor depending on what the task does. # For example, you may want to double the available size if image # conversion is enabled. All this being said, remember these are just # estimations and you should do them based on the worst case scenario # and be prepared to act in case they were wrong. (string value) #work_dir = [taskflow_executor] # # From glance.api # # The mode in which the engine will run. Can be 'serial' or # 'parallel'. (string value) # Allowed values: serial, parallel #engine_mode = parallel # The number of parallel activities executed at the same time by the # engine. The value can be greater than one when the engine mode is # 'parallel'. (integer value) # Deprecated group/name - [task]/eventlet_executor_pool_size #max_workers = 10 glance-12.0.0/etc/glance-registry.conf0000664000567000056710000014137312701407047020720 0ustar jenkinsjenkins00000000000000[DEFAULT] # # From glance.registry # # When true, this option sets the owner of an image to be the tenant. # Otherwise, the owner of the image will be the authenticated user # issuing the request. (boolean value) #owner_is_tenant = true # Role used to identify an authenticated user as administrator. # (string value) #admin_role = admin # Allow unauthenticated users to access the API with read-only # privileges. This only applies when using ContextMiddleware. (boolean # value) #allow_anonymous_access = false # Limits request ID length. (integer value) #max_request_id_length = 64 # Whether to allow users to specify image properties beyond what the # image schema provides (boolean value) #allow_additional_image_properties = true # Maximum number of image members per image. Negative values evaluate # to unlimited. (integer value) #image_member_quota = 128 # Maximum number of properties allowed on an image. Negative values # evaluate to unlimited. (integer value) #image_property_quota = 128 # Maximum number of tags allowed on an image. Negative values evaluate # to unlimited. (integer value) #image_tag_quota = 128 # Maximum number of locations allowed on an image. Negative values # evaluate to unlimited. (integer value) #image_location_quota = 10 # Python module path of data access API (string value) #data_api = glance.db.sqlalchemy.api # Default value for the number of items returned by a request if not # specified explicitly in the request (integer value) #limit_param_default = 25 # Maximum permissible number of items that could be returned by a # request (integer value) #api_limit_max = 1000 # Whether to include the backend image storage location in image # properties. Revealing storage location can be a security risk, so # use this setting with caution! (boolean value) #show_image_direct_url = false # Whether to include the backend image locations in image properties. # For example, if using the file system store a URL of # "file:///path/to/image" will be returned to the user in the # 'direct_url' meta-data field. Revealing storage location can be a # security risk, so use this setting with caution! Setting this to # true overrides the show_image_direct_url option. (boolean value) #show_multiple_locations = false # Maximum size of image a user can upload in bytes. Defaults to # 1099511627776 bytes (1 TB).WARNING: this value should only be # increased after careful consideration and must be set to a value # under 8 EB (9223372036854775808). (integer value) # Maximum value: 9223372036854775808 #image_size_cap = 1099511627776 # Set a system wide quota for every user. This value is the total # capacity that a user can use across all storage systems. A value of # 0 means unlimited.Optional unit can be specified for the value. # Accepted units are B, KB, MB, GB and TB representing Bytes, # KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no # unit is specified then Bytes is assumed. Note that there should not # be any space between value and unit and units are case sensitive. # (string value) #user_storage_quota = 0 # Deploy the v1 OpenStack Images API. (boolean value) #enable_v1_api = true # Deploy the v2 OpenStack Images API. (boolean value) #enable_v2_api = true # Deploy the v1 OpenStack Registry API. (boolean value) #enable_v1_registry = true # Deploy the v2 OpenStack Registry API. (boolean value) #enable_v2_registry = true # The hostname/IP of the pydev process listening for debug connections # (string value) #pydev_worker_debug_host = # The port on which a pydev process is listening for connections. # (port value) # Minimum value: 0 # Maximum value: 65535 #pydev_worker_debug_port = 5678 # AES key for encrypting store 'location' metadata. This includes, if # used, Swift or S3 credentials. Should be set to a random string of # length 16, 24 or 32 bytes (string value) #metadata_encryption_key = # Digest algorithm which will be used for digital signature. Use the # command "openssl list-message-digest-algorithms" to get the # available algorithms supported by the version of OpenSSL on the # platform. Examples are "sha1", "sha256", "sha512", etc. (string # value) #digest_algorithm = sha256 # Address to bind the server. Useful when selecting a particular # network interface. (string value) #bind_host = 0.0.0.0 # The port on which the server will listen. (port value) # Minimum value: 0 # Maximum value: 65535 #bind_port = # The backlog value that will be used when creating the TCP listener # socket. (integer value) #backlog = 4096 # The value for the socket option TCP_KEEPIDLE. This is the time in # seconds that the connection must be idle before TCP starts sending # keepalive probes. (integer value) #tcp_keepidle = 600 # CA certificate file to use to verify connecting clients. (string # value) #ca_file = # Certificate file to use when starting API server securely. (string # value) #cert_file = # Private key file to use when starting API server securely. (string # value) #key_file = # The number of child process workers that will be created to service # requests. The default will be equal to the number of CPUs available. # (integer value) #workers = # Maximum line size of message headers to be accepted. max_header_line # may need to be increased when using large tokens (typically those # generated by the Keystone v3 API with big service catalogs (integer # value) #max_header_line = 16384 # If False, server will return the header "Connection: close", If # True, server will return "Connection: Keep-Alive" in its responses. # In order to close the client socket connection explicitly after the # response is sent and read successfully by the client, you simply # have to set this option to False when you create a wsgi server. # (boolean value) #http_keepalive = true # Timeout for client connections' socket operations. If an incoming # connection is idle for this number of seconds it will be closed. A # value of '0' means wait forever. (integer value) #client_socket_timeout = 900 # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of # the default INFO level. (boolean value) #debug = false # If set to false, the logging level will be set to WARNING instead of # the default INFO level. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #verbose = true # The name of a logging configuration file. This file is appended to # any existing logging configuration files. For details about logging # configuration files, see the Python logging module documentation. # Note that when logging configuration files are used then all logging # configuration is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. # (string value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default # is set, logging will go to stderr as defined by use_stderr. This # option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. # This option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Uses logging handler designed to watch file system. When log file is # moved or removed this handler will open a new log file with # specified path instantaneously. It makes sense only if log_file # option is specified and Linux platform is used. This option is # ignored if log_config_append is set. (boolean value) #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and # will be changed later to honor RFC5424. This option is ignored if # log_config_append is set. (boolean value) #use_syslog = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if # log_config_append is set. (boolean value) #use_stderr = true # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. # (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the # message is DEBUG. (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. (string # value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. (string value) #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is # ignored if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. # (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. # (string value) #instance_uuid_format = "[instance: %(uuid)s] " # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false # # From oslo.messaging # # Size of RPC connection pool. (integer value) # Deprecated group/name - [DEFAULT]/rpc_conn_pool_size #rpc_conn_pool_size = 30 # ZeroMQ bind address. Should be a wildcard (*), an ethernet # interface, or IP. The "host" option should point or resolve to this # address. (string value) #rpc_zmq_bind_address = * # MatchMaker driver. (string value) # Allowed values: redis, dummy #rpc_zmq_matchmaker = redis # Type of concurrency used. Either "native" or "eventlet" (string # value) #rpc_zmq_concurrency = eventlet # Number of ZeroMQ contexts, defaults to 1. (integer value) #rpc_zmq_contexts = 1 # Maximum number of ingress messages to locally buffer per topic. # Default is unlimited. (integer value) #rpc_zmq_topic_backlog = # Directory for holding IPC sockets. (string value) #rpc_zmq_ipc_dir = /var/run/openstack # Name of this node. Must be a valid hostname, FQDN, or IP address. # Must match "host" option, if running Nova. (string value) #rpc_zmq_host = localhost # Seconds to wait before a cast expires (TTL). The default value of -1 # specifies an infinite linger period. The value of 0 specifies no # linger period. Pending messages shall be discarded immediately when # the socket is closed. Only supported by impl_zmq. (integer value) #rpc_cast_timeout = -1 # The default number of seconds that poll should wait. Poll raises # timeout exception when timeout expired. (integer value) #rpc_poll_timeout = 1 # Expiration timeout in seconds of a name service record about # existing target ( < 0 means no timeout). (integer value) #zmq_target_expire = 120 # Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. # (boolean value) #use_pub_sub = true # Minimal port number for random ports range. (port value) # Minimum value: 0 # Maximum value: 65535 #rpc_zmq_min_port = 49152 # Maximal port number for random ports range. (integer value) # Minimum value: 1 # Maximum value: 65536 #rpc_zmq_max_port = 65536 # Number of retries to find free port number before fail with # ZMQBindError. (integer value) #rpc_zmq_bind_port_retries = 100 # Size of executor thread pool. (integer value) # Deprecated group/name - [DEFAULT]/rpc_thread_pool_size #executor_thread_pool_size = 64 # Seconds to wait for a response from a call. (integer value) #rpc_response_timeout = 60 # A URL representing the messaging driver to use and its full # configuration. If not set, we fall back to the rpc_backend option # and driver specific configuration. (string value) #transport_url = # The messaging driver to use, defaults to rabbit. Other drivers # include amqp and zmq. (string value) #rpc_backend = rabbit # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the transport_url # option. (string value) #control_exchange = openstack [database] # # From oslo.db # # The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) # Deprecated group/name - [DEFAULT]/sqlite_synchronous #sqlite_synchronous = true # The back end to use for the database. (string value) # Deprecated group/name - [DEFAULT]/db_backend #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. # (string value) # Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [sql]/connection #connection = # The SQLAlchemy connection string to use to connect to the slave # database. (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including # the default, overrides any server-set SQL mode. To use whatever SQL # mode is set by the server configuration, set this to no value. # Example: mysql_sql_mode= (string value) #mysql_sql_mode = TRADITIONAL # Timeout before idle SQL connections are reaped. (integer value) # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout #idle_timeout = 3600 # Minimum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_min_pool_size # Deprecated group/name - [DATABASE]/sql_min_pool_size #min_pool_size = 1 # Maximum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_pool_size # Deprecated group/name - [DATABASE]/sql_max_pool_size #max_pool_size = # Maximum number of database connection retries during startup. Set to # -1 to specify an infinite retry count. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries #max_retries = 10 # Interval between retries of opening a SQL connection. (integer # value) # Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DATABASE]/reconnect_interval #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_overflow # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. # (integer value) # Deprecated group/name - [DEFAULT]/sql_connection_debug #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) # Deprecated group/name - [DEFAULT]/sql_connection_trace #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer # value) # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout #pool_timeout = # Enable the experimental use of database reconnect on connection # lost. (boolean value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database # operation up to db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries # of a database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before # error is raised. Set to -1 to specify an infinite retry count. # (integer value) #db_max_retries = 20 # # From oslo.db.concurrency # # Enable the experimental use of thread pooling for all DB API calls # (boolean value) # Deprecated group/name - [DEFAULT]/dbapi_use_tpool #use_tpool = false [glance_store] # # From glance.store # # List of stores enabled. Valid stores are: cinder, file, http, rbd, # sheepdog, swift, s3, vsphere (list value) #stores = file,http # Default scheme to use to store image data. The scheme must be # registered by one of the stores defined by the 'stores' config # option. (string value) #default_store = file # Minimum interval seconds to execute updating dynamic storage # capabilities based on backend status then. It's not a periodic # routine, the update logic will be executed only when interval # seconds elapsed and an operation of store has triggered. The feature # will be enabled only when the option value greater then zero. # (integer value) #store_capabilities_update_min_interval = 0 # Specify the path to the CA bundle file to use in verifying the # remote server certificate. (string value) #https_ca_certificates_file = # If true, the remote server certificate is not verified. If false, # then the default CA truststore is used for verification. This option # is ignored if "https_ca_certificates_file" is set. (boolean value) #https_insecure = true # Specify the http/https proxy information that should be used to # connect to the remote server. The proxy information should be a key # value pair of the scheme and proxy. e.g. http:10.0.0.1:3128. You can # specify proxies for multiple schemes by seperating the key value # pairs with a comma.e.g. http:10.0.0.1:3128, https:10.0.0.1:1080. # (dict value) #http_proxy_information = # If True, swiftclient won't check for a valid SSL certificate when # authenticating. (boolean value) #swift_store_auth_insecure = false # A string giving the CA certificate file to use in SSL connections # for verifying certs. (string value) #swift_store_cacert = # The region of the swift endpoint to be used for single tenant. This # setting is only necessary if the tenant has multiple swift # endpoints. (string value) #swift_store_region = # If set, the configured endpoint will be used. If None, the storage # url from the auth response will be used. (string value) #swift_store_endpoint = # A string giving the endpoint type of the swift service to use # (publicURL, adminURL or internalURL). This setting is only used if # swift_store_auth_version is 2. (string value) #swift_store_endpoint_type = publicURL # A string giving the service type of the swift service to use. This # setting is only used if swift_store_auth_version is 2. (string # value) #swift_store_service_type = object-store # Container within the account that the account should use for storing # images in Swift when using single container mode. In multiple # container mode, this will be the prefix for all containers. (string # value) #swift_store_container = glance # The size, in MB, that Glance will start chunking image files and do # a large object manifest in Swift. (integer value) #swift_store_large_object_size = 5120 # The amount of data written to a temporary disk buffer during the # process of chunking the image file. (integer value) #swift_store_large_object_chunk_size = 200 # A boolean value that determines if we create the container if it # does not exist. (boolean value) #swift_store_create_container_on_put = false # If set to True, enables multi-tenant storage mode which causes # Glance images to be stored in tenant specific Swift accounts. # (boolean value) #swift_store_multi_tenant = false # When set to 0, a single-tenant store will only use one container to # store all images. When set to an integer value between 1 and 32, a # single-tenant store will use multiple containers to store images, # and this value will determine how many containers are created.Used # only when swift_store_multi_tenant is disabled. The total number of # containers that will be used is equal to 16^N, so if this config # option is set to 2, then 16^2=256 containers will be used to store # images. (integer value) #swift_store_multiple_containers_seed = 0 # A list of tenants that will be granted read/write access on all # Swift containers created by Glance in multi-tenant mode. (list # value) #swift_store_admin_tenants = # If set to False, disables SSL layer compression of https swift # requests. Setting to False may improve performance for images which # are already in a compressed format, eg qcow2. (boolean value) #swift_store_ssl_compression = true # The number of times a Swift download will be retried before the # request fails. (integer value) #swift_store_retry_get_count = 0 # The period of time (in seconds) before token expirationwhen # glance_store will try to reques new user token. Default value 60 sec # means that if token is going to expire in 1 min then glance_store # request new user token. (integer value) #swift_store_expire_soon_interval = 60 # If set to True create a trust for each add/get request to Multi- # tenant store in order to prevent authentication token to be expired # during uploading/downloading data. If set to False then user token # is used for Swift connection (so no overhead on trust creation). # Please note that this option is considered only and only if # swift_store_multi_tenant=True (boolean value) #swift_store_use_trusts = true # The reference to the default swift account/backing store parameters # to use for adding new images. (string value) #default_swift_reference = ref1 # Version of the authentication service to use. Valid versions are 2 # and 3 for keystone and 1 (deprecated) for swauth and rackspace. # (deprecated - use "auth_version" in swift_store_config_file) (string # value) #swift_store_auth_version = 2 # The address where the Swift authentication service is listening. # (deprecated - use "auth_address" in swift_store_config_file) (string # value) #swift_store_auth_address = # The user to authenticate against the Swift authentication service # (deprecated - use "user" in swift_store_config_file) (string value) #swift_store_user = # Auth key for the user authenticating against the Swift # authentication service. (deprecated - use "key" in # swift_store_config_file) (string value) #swift_store_key = # The config file that has the swift account(s)configs. (string value) #swift_store_config_file = # RADOS images will be chunked into objects of this size (in # megabytes). For best performance, this should be a power of two. # (integer value) #rbd_store_chunk_size = 8 # RADOS pool in which images are stored. (string value) #rbd_store_pool = images # RADOS user to authenticate as (only applicable if using Cephx. If # , a default will be chosen based on the client. section in # rbd_store_ceph_conf) (string value) #rbd_store_user = # Ceph configuration file path. If , librados will locate the # default config. If using cephx authentication, this file should # include a reference to the right keyring in a client. section # (string value) #rbd_store_ceph_conf = /etc/ceph/ceph.conf # Timeout value (in seconds) used when connecting to ceph cluster. If # value <= 0, no timeout is set and default librados value is used. # (integer value) #rados_connect_timeout = 0 # Info to match when looking for cinder in the service catalog. Format # is : separated values of the form: # :: (string value) #cinder_catalog_info = volumev2::publicURL # Override service catalog lookup with template for cinder endpoint # e.g. http://localhost:8776/v2/%(tenant)s (string value) #cinder_endpoint_template = # Region name of this node. If specified, it will be used to locate # OpenStack services for stores. (string value) # Deprecated group/name - [DEFAULT]/os_region_name #cinder_os_region_name = # Location of ca certicates file to use for cinder client requests. # (string value) #cinder_ca_certificates_file = # Number of cinderclient retries on failed http calls (integer value) #cinder_http_retries = 3 # Time period of time in seconds to wait for a cinder volume # transition to complete. (integer value) #cinder_state_transition_timeout = 300 # Allow to perform insecure SSL requests to cinder (boolean value) #cinder_api_insecure = false # The address where the Cinder authentication service is listening. If # , the cinder endpoint in the service catalog is used. (string # value) #cinder_store_auth_address = # User name to authenticate against Cinder. If , the user of # current context is used. (string value) #cinder_store_user_name = # Password for the user authenticating against Cinder. If , the # current context auth token is used. (string value) #cinder_store_password = # Project name where the image is stored in Cinder. If , the # project in current context is used. (string value) #cinder_store_project_name = # Path to the rootwrap configuration file to use for running commands # as root. (string value) #rootwrap_config = /etc/glance/rootwrap.conf # The host where the S3 server is listening. (string value) #s3_store_host = # The S3 query token access key. (string value) #s3_store_access_key = # The S3 query token secret key. (string value) #s3_store_secret_key = # The S3 bucket to be used to store the Glance data. (string value) #s3_store_bucket = # The local directory where uploads will be staged before they are # transferred into S3. (string value) #s3_store_object_buffer_dir = # A boolean to determine if the S3 bucket should be created on upload # if it does not exist or if an error should be returned to the user. # (boolean value) #s3_store_create_bucket_on_put = false # The S3 calling format used to determine the bucket. Either subdomain # or path can be used. (string value) #s3_store_bucket_url_format = subdomain # What size, in MB, should S3 start chunking image files and do a # multipart upload in S3. (integer value) #s3_store_large_object_size = 100 # What multipart upload part size, in MB, should S3 use when uploading # parts. The size must be greater than or equal to 5M. (integer value) #s3_store_large_object_chunk_size = 10 # The number of thread pools to perform a multipart upload in S3. # (integer value) #s3_store_thread_pools = 10 # Enable the use of a proxy. (boolean value) #s3_store_enable_proxy = false # Address or hostname for the proxy server. (string value) #s3_store_proxy_host = # The port to use when connecting over a proxy. (integer value) #s3_store_proxy_port = 8080 # The username to connect to the proxy. (string value) #s3_store_proxy_user = # The password to use when connecting over a proxy. (string value) #s3_store_proxy_password = # Images will be chunked into objects of this size (in megabytes). For # best performance, this should be a power of two. (integer value) #sheepdog_store_chunk_size = 64 # Port of sheep daemon. (integer value) #sheepdog_store_port = 7000 # IP address of sheep daemon. (string value) #sheepdog_store_address = localhost # Directory to which the Filesystem backend store writes images. # (string value) #filesystem_store_datadir = /var/lib/glance/images # List of directories and its priorities to which the Filesystem # backend store writes images. (multi valued) #filesystem_store_datadirs = # The path to a file which contains the metadata to be returned with # any location associated with this store. The file must contain a # valid JSON object. The object should contain the keys 'id' and # 'mountpoint'. The value for both keys should be 'string'. (string # value) #filesystem_store_metadata_file = # The required permission for created image file. In this way the user # other service used, e.g. Nova, who consumes the image could be the # exclusive member of the group that owns the files created. Assigning # it less then or equal to zero means don't change the default # permission of the file. This value will be decoded as an octal # digit. (integer value) #filesystem_store_file_perm = 0 # ESX/ESXi or vCenter Server target system. The server value can be an # IP address or a DNS name. (string value) #vmware_server_host = # Username for authenticating with VMware ESX/VC server. (string # value) #vmware_server_username = # Password for authenticating with VMware ESX/VC server. (string # value) #vmware_server_password = # Number of times VMware ESX/VC server API must be retried upon # connection related issues. (integer value) #vmware_api_retry_count = 10 # The interval used for polling remote tasks invoked on VMware ESX/VC # server. (integer value) #vmware_task_poll_interval = 5 # The name of the directory where the glance images will be stored in # the VMware datastore. (string value) #vmware_store_image_dir = /openstack_glance # If true, the ESX/vCenter server certificate is not verified. If # false, then the default CA truststore is used for verification. This # option is ignored if "vmware_ca_file" is set. (boolean value) # Deprecated group/name - [DEFAULT]/vmware_api_insecure #vmware_insecure = false # Specify a CA bundle file to use in verifying the ESX/vCenter server # certificate. (string value) #vmware_ca_file = # A list of datastores where the image can be stored. This option may # be specified multiple times for specifying multiple datastores. The # datastore name should be specified after its datacenter path, # seperated by ":". An optional weight may be given after the # datastore name, seperated again by ":". Thus, the required format # becomes ::. When # adding an image, the datastore with highest weight will be selected, # unless there is not enough free space available in cases where the # image size is already known. If no weight is given, it is assumed to # be zero and the directory will be considered for selection last. If # multiple datastores have the same weight, then the one with the most # free space available is selected. (multi valued) #vmware_datastores = [keystone_authtoken] # # From keystonemiddleware.auth_token # # Complete public Identity API endpoint. (string value) #auth_uri = # API version of the admin Identity API endpoint. (string value) #auth_version = # Do not handle authorization requests within the middleware, but # delegate the authorization decision to downstream WSGI components. # (boolean value) #delay_auth_decision = false # Request timeout value for communicating with Identity API server. # (integer value) #http_connect_timeout = # How many times are we trying to reconnect when communicating with # Identity API Server. (integer value) #http_request_max_retries = 3 # Env key for the swift cache. (string value) #cache = # Required if identity server requires client certificate (string # value) #certfile = # Required if identity server requires client certificate (string # value) #keyfile = # A PEM encoded Certificate Authority to use when verifying HTTPs # connections. Defaults to system CAs. (string value) #cafile = # Verify HTTPS connections. (boolean value) #insecure = false # The region in which the identity server can be found. (string value) #region_name = # Directory used to cache files related to PKI tokens. (string value) #signing_dir = # Optionally specify a list of memcached server(s) to use for caching. # If left undefined, tokens will instead be cached in-process. (list # value) # Deprecated group/name - [DEFAULT]/memcache_servers #memcached_servers = # In order to prevent excessive effort spent validating tokens, the # middleware caches previously-seen tokens for a configurable duration # (in seconds). Set to -1 to disable caching completely. (integer # value) #token_cache_time = 300 # Determines the frequency at which the list of revoked tokens is # retrieved from the Identity service (in seconds). A high number of # revocation events combined with a low cache duration may # significantly reduce performance. (integer value) #revocation_cache_time = 10 # (Optional) If defined, indicate whether token data should be # authenticated or authenticated and encrypted. If MAC, token data is # authenticated (with HMAC) in the cache. If ENCRYPT, token data is # encrypted and authenticated in the cache. If the value is not one of # these options or empty, auth_token will raise an exception on # initialization. (string value) # Allowed values: None, MAC, ENCRYPT #memcache_security_strategy = None # (Optional, mandatory if memcache_security_strategy is defined) This # string is used for key derivation. (string value) #memcache_secret_key = # (Optional) Number of seconds memcached server is considered dead # before it is tried again. (integer value) #memcache_pool_dead_retry = 300 # (Optional) Maximum total number of open connections to every # memcached server. (integer value) #memcache_pool_maxsize = 10 # (Optional) Socket timeout in seconds for communicating with a # memcached server. (integer value) #memcache_pool_socket_timeout = 3 # (Optional) Number of seconds a connection to memcached is held # unused in the pool before it is closed. (integer value) #memcache_pool_unused_timeout = 60 # (Optional) Number of seconds that an operation will wait to get a # memcached client connection from the pool. (integer value) #memcache_pool_conn_get_timeout = 10 # (Optional) Use the advanced (eventlet safe) memcached client pool. # The advanced pool will only work under python 2.x. (boolean value) #memcache_use_advanced_pool = false # (Optional) Indicate whether to set the X-Service-Catalog header. If # False, middleware will not ask for service catalog on token # validation and will not set the X-Service-Catalog header. (boolean # value) #include_service_catalog = true # Used to control the use and type of token binding. Can be set to: # "disabled" to not check token binding. "permissive" (default) to # validate binding information if the bind type is of a form known to # the server and ignore it if not. "strict" like "permissive" but if # the bind type is unknown the token will be rejected. "required" any # form of token binding is needed to be allowed. Finally the name of a # binding method that must be present in tokens. (string value) #enforce_token_bind = permissive # If true, the revocation list will be checked for cached tokens. This # requires that PKI tokens are configured on the identity server. # (boolean value) #check_revocations_for_cached = false # Hash algorithms to use for hashing PKI tokens. This may be a single # algorithm or multiple. The algorithms are those supported by Python # standard hashlib.new(). The hashes will be tried in the order given, # so put the preferred one first for performance. The result of the # first hash will be stored in the cache. This will typically be set # to multiple values only while migrating from a less secure algorithm # to a more secure one. Once all the old tokens are expired this # option should be set to a single value for better performance. (list # value) #hash_algorithms = md5 # Authentication type to load (unknown value) # Deprecated group/name - [DEFAULT]/auth_plugin #auth_type = # Config Section from which to load plugin specific options (unknown # value) #auth_section = [matchmaker_redis] # # From oslo.messaging # # Host to locate redis. (string value) #host = 127.0.0.1 # Use this port to connect to redis host. (port value) # Minimum value: 0 # Maximum value: 65535 #port = 6379 # Password for Redis server (optional). (string value) #password = # List of Redis Sentinel hosts (fault tolerance mode) e.g. # [host:port, host1:port ... ] (list value) #sentinel_hosts = # Redis replica set name. (string value) #sentinel_group_name = oslo-messaging-zeromq # Time in ms to wait between connection attempts. (integer value) #wait_timeout = 500 # Time in ms to wait before the transaction is killed. (integer value) #check_timeout = 20000 # Timeout in ms on blocking socket operations (integer value) #socket_timeout = 1000 [oslo_messaging_amqp] # # From oslo.messaging # # address prefix used when sending to a specific server (string value) # Deprecated group/name - [amqp1]/server_request_prefix #server_request_prefix = exclusive # address prefix used when broadcasting to all servers (string value) # Deprecated group/name - [amqp1]/broadcast_prefix #broadcast_prefix = broadcast # address prefix when sending to any server in group (string value) # Deprecated group/name - [amqp1]/group_request_prefix #group_request_prefix = unicast # Name for the AMQP container (string value) # Deprecated group/name - [amqp1]/container_name #container_name = # Timeout for inactive connections (in seconds) (integer value) # Deprecated group/name - [amqp1]/idle_timeout #idle_timeout = 0 # Debug: dump AMQP frames to stdout (boolean value) # Deprecated group/name - [amqp1]/trace #trace = false # CA certificate PEM file to verify server certificate (string value) # Deprecated group/name - [amqp1]/ssl_ca_file #ssl_ca_file = # Identifying certificate PEM file to present to clients (string # value) # Deprecated group/name - [amqp1]/ssl_cert_file #ssl_cert_file = # Private key PEM file used to sign cert_file certificate (string # value) # Deprecated group/name - [amqp1]/ssl_key_file #ssl_key_file = # Password for decrypting ssl_key_file (if encrypted) (string value) # Deprecated group/name - [amqp1]/ssl_key_password #ssl_key_password = # Accept clients using either SSL or plain TCP (boolean value) # Deprecated group/name - [amqp1]/allow_insecure_clients #allow_insecure_clients = false # Space separated list of acceptable SASL mechanisms (string value) # Deprecated group/name - [amqp1]/sasl_mechanisms #sasl_mechanisms = # Path to directory that contains the SASL configuration (string # value) # Deprecated group/name - [amqp1]/sasl_config_dir #sasl_config_dir = # Name of configuration file (without .conf suffix) (string value) # Deprecated group/name - [amqp1]/sasl_config_name #sasl_config_name = # User name for message broker authentication (string value) # Deprecated group/name - [amqp1]/username #username = # Password for message broker authentication (string value) # Deprecated group/name - [amqp1]/password #password = [oslo_messaging_notifications] # # From oslo.messaging # # The Drivers(s) to handle sending notifications. Possible values are # messaging, messagingv2, routing, log, test, noop (multi valued) # Deprecated group/name - [DEFAULT]/notification_driver #driver = # A URL representing the messaging driver to use for notifications. If # not set, we fall back to the same configuration used for RPC. # (string value) # Deprecated group/name - [DEFAULT]/notification_transport_url #transport_url = # AMQP topic used for OpenStack notifications. (list value) # Deprecated group/name - [rpc_notifier2]/topics # Deprecated group/name - [DEFAULT]/notification_topics #topics = notifications [oslo_messaging_rabbit] # # From oslo.messaging # # Use durable queues in AMQP. (boolean value) # Deprecated group/name - [DEFAULT]/amqp_durable_queues # Deprecated group/name - [DEFAULT]/rabbit_durable_queues #amqp_durable_queues = false # Auto-delete queues in AMQP. (boolean value) # Deprecated group/name - [DEFAULT]/amqp_auto_delete #amqp_auto_delete = false # SSL version to use (valid only if SSL enabled). Valid values are # TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be # available on some distributions. (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_version #kombu_ssl_version = # SSL key file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile #kombu_ssl_keyfile = # SSL cert file (valid only if SSL enabled). (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile #kombu_ssl_certfile = # SSL certification authority file (valid only if SSL enabled). # (string value) # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs #kombu_ssl_ca_certs = # How long to wait before reconnecting in response to an AMQP consumer # cancel notification. (floating point value) # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay #kombu_reconnect_delay = 1.0 # EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression # will not be used. This option may notbe available in future # versions. (string value) #kombu_compression = # How long to wait a missing client beforce abandoning to send it its # replies. This value should not be longer than rpc_response_timeout. # (integer value) # Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout #kombu_missing_consumer_retry_timeout = 60 # Determines how the next RabbitMQ node is chosen in case the one we # are currently connected to becomes unavailable. Takes effect only if # more than one RabbitMQ node is provided in config. (string value) # Allowed values: round-robin, shuffle #kombu_failover_strategy = round-robin # The RabbitMQ broker address where a single node is used. (string # value) # Deprecated group/name - [DEFAULT]/rabbit_host #rabbit_host = localhost # The RabbitMQ broker port where a single node is used. (port value) # Minimum value: 0 # Maximum value: 65535 # Deprecated group/name - [DEFAULT]/rabbit_port #rabbit_port = 5672 # RabbitMQ HA cluster host:port pairs. (list value) # Deprecated group/name - [DEFAULT]/rabbit_hosts #rabbit_hosts = $rabbit_host:$rabbit_port # Connect over SSL for RabbitMQ. (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_use_ssl #rabbit_use_ssl = false # The RabbitMQ userid. (string value) # Deprecated group/name - [DEFAULT]/rabbit_userid #rabbit_userid = guest # The RabbitMQ password. (string value) # Deprecated group/name - [DEFAULT]/rabbit_password #rabbit_password = guest # The RabbitMQ login method. (string value) # Deprecated group/name - [DEFAULT]/rabbit_login_method #rabbit_login_method = AMQPLAIN # The RabbitMQ virtual host. (string value) # Deprecated group/name - [DEFAULT]/rabbit_virtual_host #rabbit_virtual_host = / # How frequently to retry connecting with RabbitMQ. (integer value) #rabbit_retry_interval = 1 # How long to backoff for between retries when connecting to RabbitMQ. # (integer value) # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff #rabbit_retry_backoff = 2 # Maximum interval of RabbitMQ connection retries. Default is 30 # seconds. (integer value) #rabbit_interval_max = 30 # Maximum number of RabbitMQ connection retries. Default is 0 # (infinite retry count). (integer value) # Deprecated group/name - [DEFAULT]/rabbit_max_retries #rabbit_max_retries = 0 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change # this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, # queue mirroring is no longer controlled by the x-ha-policy argument # when declaring a queue. If you just want to make sure that all # queues (except those with auto-generated names) are mirrored across # all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha- # mode": "all"}' " (boolean value) # Deprecated group/name - [DEFAULT]/rabbit_ha_queues #rabbit_ha_queues = false # Positive integer representing duration in seconds for queue TTL # (x-expires). Queues which are unused for the duration of the TTL are # automatically deleted. The parameter affects only reply and fanout # queues. (integer value) # Minimum value: 1 #rabbit_transient_queues_ttl = 600 # Specifies the number of messages to prefetch. Setting to zero allows # unlimited messages. (integer value) #rabbit_qos_prefetch_count = 0 # Number of seconds after which the Rabbit broker is considered down # if heartbeat's keep-alive fails (0 disable the heartbeat). # EXPERIMENTAL (integer value) #heartbeat_timeout_threshold = 60 # How often times during the heartbeat_timeout_threshold we check the # heartbeat. (integer value) #heartbeat_rate = 2 # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake # (boolean value) # Deprecated group/name - [DEFAULT]/fake_rabbit #fake_rabbit = false # Maximum number of channels to allow (integer value) #channel_max = # The maximum byte size for an AMQP frame (integer value) #frame_max = # How often to send heartbeats for consumer's connections (integer # value) #heartbeat_interval = 1 # Enable SSL (boolean value) #ssl = # Arguments passed to ssl.wrap_socket (dict value) #ssl_options = # Set socket timeout in seconds for connection's socket (floating # point value) #socket_timeout = 0.25 # Set TCP_USER_TIMEOUT in seconds for connection's socket (floating # point value) #tcp_user_timeout = 0.25 # Set delay for reconnection to some host which has connection error # (floating point value) #host_connection_reconnect_delay = 0.25 # Maximum number of connections to keep queued. (integer value) #pool_max_size = 10 # Maximum number of connections to create above `pool_max_size`. # (integer value) #pool_max_overflow = 0 # Default number of seconds to wait for a connections to available # (integer value) #pool_timeout = 30 # Lifetime of a connection (since creation) in seconds or None for no # recycling. Expired connections are closed on acquire. (integer # value) #pool_recycle = 600 # Threshold at which inactive (since release) connections are # considered stale in seconds or None for no staleness. Stale # connections are closed on acquire. (integer value) #pool_stale = 60 # Persist notification messages. (boolean value) #notification_persistence = false # Exchange name for for sending notifications (string value) #default_notification_exchange = ${control_exchange}_notification # Max number of not acknowledged message which RabbitMQ can send to # notification listener. (integer value) #notification_listener_prefetch_count = 100 # Reconnecting retry count in case of connectivity problem during # sending notification, -1 means infinite retry. (integer value) #default_notification_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending notification message (floating point value) #notification_retry_delay = 0.25 # Time to live for rpc queues without consumers in seconds. (integer # value) #rpc_queue_expiration = 60 # Exchange name for sending RPC messages (string value) #default_rpc_exchange = ${control_exchange}_rpc # Exchange name for receiving RPC replies (string value) #rpc_reply_exchange = ${control_exchange}_rpc_reply # Max number of not acknowledged message which RabbitMQ can send to # rpc listener. (integer value) #rpc_listener_prefetch_count = 100 # Max number of not acknowledged message which RabbitMQ can send to # rpc reply listener. (integer value) #rpc_reply_listener_prefetch_count = 100 # Reconnecting retry count in case of connectivity problem during # sending reply. -1 means infinite retry during rpc_timeout (integer # value) #rpc_reply_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending reply. (floating point value) #rpc_reply_retry_delay = 0.25 # Reconnecting retry count in case of connectivity problem during # sending RPC message, -1 means infinite retry. If actual retry # attempts in not 0 the rpc request could be processed more then one # time (integer value) #default_rpc_retry_attempts = -1 # Reconnecting retry delay in case of connectivity problem during # sending RPC message (floating point value) #rpc_retry_delay = 0.25 [oslo_policy] # # From oslo.policy # # The JSON file that defines policies. (string value) # Deprecated group/name - [DEFAULT]/policy_file #policy_file = policy.json # Default rule. Enforced when a requested rule is not found. (string # value) # Deprecated group/name - [DEFAULT]/policy_default_rule #policy_default_rule = default # Directories where policy configuration files are stored. They can be # relative to any directory in the search path defined by the # config_dir option, or absolute paths. The file defined by # policy_file must exist for these directories to be searched. # Missing or empty directories are ignored. (multi valued) # Deprecated group/name - [DEFAULT]/policy_dirs #policy_dirs = policy.d [paste_deploy] # # From glance.registry # # Partial name of a pipeline in your paste configuration file with the # service name removed. For example, if your paste section name is # [pipeline:glance-api-keystone] use the value "keystone" (string # value) #flavor = # Name of the paste configuration file. (string value) #config_file = [profiler] # # From glance.registry # # If False fully disable profiling feature. (boolean value) #enabled = false # If False doesn't trace SQL requests. (boolean value) #trace_sqlalchemy = false # Secret key to use to sign Glance API and Glance Registry services # tracing messages. (string value) #hmac_keys = SECRET_KEY glance-12.0.0/etc/oslo-config-generator/0000775000567000056710000000000012701407204021137 5ustar jenkinsjenkins00000000000000glance-12.0.0/etc/oslo-config-generator/glance-api.conf0000664000567000056710000000050712701407047024015 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/glance-api.conf.sample namespace = glance.api namespace = glance.store namespace = oslo.concurrency namespace = oslo.messaging namespace = oslo.db namespace = oslo.db.concurrency namespace = oslo.policy namespace = keystonemiddleware.auth_token namespace = oslo.log namespace = oslo.middleware.cors glance-12.0.0/etc/oslo-config-generator/glance-registry.conf0000664000567000056710000000042312701407047025111 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/glance-registry.conf.sample namespace = glance.registry namespace = glance.store namespace = oslo.messaging namespace = oslo.db namespace = oslo.db.concurrency namespace = oslo.policy namespace = keystonemiddleware.auth_token namespace = oslo.log glance-12.0.0/etc/oslo-config-generator/glance-glare.conf0000664000567000056710000000037312701407047024337 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/glance-glare.conf.sample namespace = glance.glare namespace = glance.store namespace = oslo.db namespace = oslo.db.concurrency namespace = keystonemiddleware.auth_token namespace = oslo.log namespace = oslo.middleware.cors glance-12.0.0/etc/oslo-config-generator/glance-scrubber.conf0000664000567000056710000000032212701407047025046 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/glance-scrubber.conf.sample namespace = glance.scrubber namespace = oslo.concurrency namespace = oslo.db namespace = oslo.db.concurrency namespace = oslo.log namespace = oslo.policy glance-12.0.0/etc/oslo-config-generator/glance-manage.conf0000664000567000056710000000023112701407047024466 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/glance-manage.conf.sample namespace = glance.manage namespace = oslo.db namespace = oslo.db.concurrency namespace = oslo.log glance-12.0.0/etc/oslo-config-generator/glance-cache.conf0000664000567000056710000000017312701407047024306 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/glance-cache.conf.sample namespace = glance.cache namespace = oslo.log namespace = oslo.policy glance-12.0.0/etc/glance-glare-paste.ini0000664000567000056710000000435612701407047021105 0ustar jenkinsjenkins00000000000000# Use this pipeline for no auth - DEFAULT [pipeline:glare-api] pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context rootapp # Use this pipeline for keystone auth [pipeline:glare-api-keystone] pipeline = cors healthcheck versionnegotiation osprofiler authtoken context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v0.1: glareapi [app:apiversions] paste.app_factory = glance.api.glare.versions:create_resource [app:glareapi] paste.app_factory = glance.api.glare.v0_1.router:API.factory [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /etc/glance/healthcheck_disable [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:GlareVersionNegotiationFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory delay_auth_decision = true [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = glance oslo_config_program = glance-glare # Basic Headers (Automatic) # Accept = Origin, Accept, Accept-Language, Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma # Expose = Origin, Accept, Accept-Language, Content-Type, Cache-Control, Content-Language, Expires, Last-Modified, Pragma # Glance Headers # Accept = Content-MD5, Accept-Encoding # Keystone Headers # Accept = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id # Expose = X-Auth-Token, X-Subject-Token, X-Service-Token # Request ID Middleware Headers # Accept = X-OpenStack-Request-ID # Expose = X-OpenStack-Request-ID latent_allow_headers = Content-MD5, Accept-Encoding, X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID glance-12.0.0/etc/policy.json0000664000567000056710000000250712701407047017137 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "default": "", "add_image": "", "delete_image": "", "get_image": "", "get_images": "", "modify_image": "", "publicize_image": "role:admin", "copy_from": "", "download_image": "", "upload_image": "", "delete_image_location": "", "get_image_location": "", "set_image_location": "", "add_member": "", "delete_member": "", "get_member": "", "get_members": "", "modify_member": "", "manage_image_cache": "role:admin", "get_task": "role:admin", "get_tasks": "role:admin", "add_task": "role:admin", "modify_task": "role:admin", "deactivate": "", "reactivate": "", "get_metadef_namespace": "", "get_metadef_namespaces":"", "modify_metadef_namespace":"", "add_metadef_namespace":"", "get_metadef_object":"", "get_metadef_objects":"", "modify_metadef_object":"", "add_metadef_object":"", "list_metadef_resource_types":"", "get_metadef_resource_type":"", "add_metadef_resource_type_association":"", "get_metadef_property":"", "get_metadef_properties":"", "modify_metadef_property":"", "add_metadef_property":"", "get_metadef_tag":"", "get_metadef_tags":"", "modify_metadef_tag":"", "add_metadef_tag":"", "add_metadef_tags":"" } glance-12.0.0/etc/glance-api-paste.ini0000664000567000056710000000630212701407047020555 0ustar jenkinsjenkins00000000000000# Use this pipeline for no auth or image caching - DEFAULT [pipeline:glance-api] pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context rootapp # Use this pipeline for image caching and no auth [pipeline:glance-api-caching] pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context cache rootapp # Use this pipeline for caching w/ management interface but no auth [pipeline:glance-api-cachemanagement] pipeline = cors healthcheck versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp # Use this pipeline for keystone auth [pipeline:glance-api-keystone] pipeline = cors healthcheck versionnegotiation osprofiler authtoken context rootapp # Use this pipeline for keystone auth with image caching [pipeline:glance-api-keystone+caching] pipeline = cors healthcheck versionnegotiation osprofiler authtoken context cache rootapp # Use this pipeline for keystone auth with caching and cache management [pipeline:glance-api-keystone+cachemanagement] pipeline = cors healthcheck versionnegotiation osprofiler authtoken context cache cachemanage rootapp # Use this pipeline for authZ only. This means that the registry will treat a # user as authenticated without making requests to keystone to reauthenticate # the user. [pipeline:glance-api-trusted-auth] pipeline = cors healthcheck versionnegotiation osprofiler context rootapp # Use this pipeline for authZ only. This means that the registry will treat a # user as authenticated without making requests to keystone to reauthenticate # the user and uses cache management [pipeline:glance-api-trusted-auth+cachemanagement] pipeline = cors healthcheck versionnegotiation osprofiler context cache cachemanage rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v1: apiv1app /v2: apiv2app [app:apiversions] paste.app_factory = glance.api.versions:create_resource [app:apiv1app] paste.app_factory = glance.api.v1.router:API.factory [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /etc/glance/healthcheck_disable [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory [filter:cachemanage] paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory delay_auth_decision = true [filter:gzip] paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory hmac_keys = SECRET_KEY #DEPRECATED enabled = yes #DEPRECATED [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = glance oslo_config_program = glance-api glance-12.0.0/etc/glance-glare.conf0000664000567000056710000010112012701407047020124 0ustar jenkinsjenkins00000000000000[DEFAULT] # # From glance.glare # # When true, this option sets the owner of an image to be the tenant. # Otherwise, the owner of the image will be the authenticated user # issuing the request. (boolean value) #owner_is_tenant = true # Role used to identify an authenticated user as administrator. # (string value) #admin_role = admin # Allow unauthenticated users to access the API with read-only # privileges. This only applies when using ContextMiddleware. (boolean # value) #allow_anonymous_access = false # Limits request ID length. (integer value) #max_request_id_length = 64 # Public url to use for versions endpoint. The default is None, which # will use the request's host_url attribute to populate the URL base. # If Glance is operating behind a proxy, you will want to change this # to represent the proxy's URL. (string value) #public_endpoint = # Address to bind the server. Useful when selecting a particular # network interface. (string value) #bind_host = 0.0.0.0 # The port on which the server will listen. (port value) # Minimum value: 0 # Maximum value: 65535 #bind_port = # The number of child process workers that will be created to service # requests. The default will be equal to the number of CPUs available. # (integer value) #workers = # Maximum line size of message headers to be accepted. max_header_line # may need to be increased when using large tokens (typically those # generated by the Keystone v3 API with big service catalogs (integer # value) #max_header_line = 16384 # If False, server will return the header "Connection: close", If # True, server will return "Connection: Keep-Alive" in its responses. # In order to close the client socket connection explicitly after the # response is sent and read successfully by the client, you simply # have to set this option to False when you create a wsgi server. # (boolean value) #http_keepalive = true # Timeout for client connections' socket operations. If an incoming # connection is idle for this number of seconds it will be closed. A # value of '0' means wait forever. (integer value) #client_socket_timeout = 900 # The backlog value that will be used when creating the TCP listener # socket. (integer value) #backlog = 4096 # The value for the socket option TCP_KEEPIDLE. This is the time in # seconds that the connection must be idle before TCP starts sending # keepalive probes. (integer value) #tcp_keepidle = 600 # CA certificate file to use to verify connecting clients. (string # value) #ca_file = # Certificate file to use when starting API server securely. (string # value) #cert_file = # Private key file to use when starting API server securely. (string # value) #key_file = # If False fully disable profiling feature. (boolean value) #enabled = false # If False doesn't trace SQL requests. (boolean value) #trace_sqlalchemy = false # Secret key to use to sign Glance API and Glance Registry services # tracing messages. (string value) #hmac_keys = SECRET_KEY # Default publisher_id for outgoing notifications. (string value) #default_publisher_id = image.localhost # List of disabled notifications. A notification can be given either # as a notification type to disable a single event, or as a # notification group prefix to disable all events within a group. # Example: if this config option is set to ["image.create", # "metadef_namespace"], then "image.create" notification will not be # sent after image is created and none of the notifications for # metadefinition namespaces will be sent. (list value) #disabled_notifications = # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of # the default INFO level. (boolean value) #debug = false # If set to false, the logging level will be set to WARNING instead of # the default INFO level. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #verbose = true # The name of a logging configuration file. This file is appended to # any existing logging configuration files. For details about logging # configuration files, see the Python logging module documentation. # Note that when logging configuration files are used then all logging # configuration is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. # (string value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default # is set, logging will go to stderr as defined by use_stderr. This # option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. # This option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Uses logging handler designed to watch file system. When log file is # moved or removed this handler will open a new log file with # specified path instantaneously. It makes sense only if log_file # option is specified and Linux platform is used. This option is # ignored if log_config_append is set. (boolean value) #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and # will be changed later to honor RFC5424. This option is ignored if # log_config_append is set. (boolean value) #use_syslog = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if # log_config_append is set. (boolean value) #use_stderr = true # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. # (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the # message is DEBUG. (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. (string # value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. (string value) #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is # ignored if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. # (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. # (string value) #instance_uuid_format = "[instance: %(uuid)s] " # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [cors] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain # received in the requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials # (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to # HTTP Simple Headers. (list value) #expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list # value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual # request. (list value) #allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID [cors.subdomain] # # From oslo.middleware.cors # # Indicate whether this resource may be shared with the domain # received in the requests "origin" header. (list value) #allowed_origin = # Indicate that the actual request can include user credentials # (boolean value) #allow_credentials = true # Indicate which headers are safe to expose to the API. Defaults to # HTTP Simple Headers. (list value) #expose_headers = X-Image-Meta-Checksum,X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID # Maximum cache age of CORS preflight requests. (integer value) #max_age = 3600 # Indicate which methods can be used during the actual request. (list # value) #allow_methods = GET,PUT,POST,DELETE,PATCH # Indicate which header field names may be used during the actual # request. (list value) #allow_headers = Content-MD5,X-Image-Meta-Checksum,X-Storage-Token,Accept-Encoding,X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID [database] # # From oslo.db # # The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) # Deprecated group/name - [DEFAULT]/sqlite_synchronous #sqlite_synchronous = true # The back end to use for the database. (string value) # Deprecated group/name - [DEFAULT]/db_backend #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. # (string value) # Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [sql]/connection #connection = # The SQLAlchemy connection string to use to connect to the slave # database. (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including # the default, overrides any server-set SQL mode. To use whatever SQL # mode is set by the server configuration, set this to no value. # Example: mysql_sql_mode= (string value) #mysql_sql_mode = TRADITIONAL # Timeout before idle SQL connections are reaped. (integer value) # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout #idle_timeout = 3600 # Minimum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_min_pool_size # Deprecated group/name - [DATABASE]/sql_min_pool_size #min_pool_size = 1 # Maximum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_pool_size # Deprecated group/name - [DATABASE]/sql_max_pool_size #max_pool_size = # Maximum number of database connection retries during startup. Set to # -1 to specify an infinite retry count. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries #max_retries = 10 # Interval between retries of opening a SQL connection. (integer # value) # Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DATABASE]/reconnect_interval #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_overflow # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. # (integer value) # Deprecated group/name - [DEFAULT]/sql_connection_debug #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) # Deprecated group/name - [DEFAULT]/sql_connection_trace #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer # value) # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout #pool_timeout = # Enable the experimental use of database reconnect on connection # lost. (boolean value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database # operation up to db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries # of a database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before # error is raised. Set to -1 to specify an infinite retry count. # (integer value) #db_max_retries = 20 # # From oslo.db.concurrency # # Enable the experimental use of thread pooling for all DB API calls # (boolean value) # Deprecated group/name - [DEFAULT]/dbapi_use_tpool #use_tpool = false [glance_store] # # From glance.store # # List of stores enabled. Valid stores are: cinder, file, http, rbd, # sheepdog, swift, s3, vsphere (list value) #stores = file,http # Default scheme to use to store image data. The scheme must be # registered by one of the stores defined by the 'stores' config # option. (string value) #default_store = file # Minimum interval seconds to execute updating dynamic storage # capabilities based on backend status then. It's not a periodic # routine, the update logic will be executed only when interval # seconds elapsed and an operation of store has triggered. The feature # will be enabled only when the option value greater then zero. # (integer value) #store_capabilities_update_min_interval = 0 # Specify the path to the CA bundle file to use in verifying the # remote server certificate. (string value) #https_ca_certificates_file = # If true, the remote server certificate is not verified. If false, # then the default CA truststore is used for verification. This option # is ignored if "https_ca_certificates_file" is set. (boolean value) #https_insecure = true # Specify the http/https proxy information that should be used to # connect to the remote server. The proxy information should be a key # value pair of the scheme and proxy. e.g. http:10.0.0.1:3128. You can # specify proxies for multiple schemes by seperating the key value # pairs with a comma.e.g. http:10.0.0.1:3128, https:10.0.0.1:1080. # (dict value) #http_proxy_information = # If True, swiftclient won't check for a valid SSL certificate when # authenticating. (boolean value) #swift_store_auth_insecure = false # A string giving the CA certificate file to use in SSL connections # for verifying certs. (string value) #swift_store_cacert = # The region of the swift endpoint to be used for single tenant. This # setting is only necessary if the tenant has multiple swift # endpoints. (string value) #swift_store_region = # If set, the configured endpoint will be used. If None, the storage # url from the auth response will be used. (string value) #swift_store_endpoint = # A string giving the endpoint type of the swift service to use # (publicURL, adminURL or internalURL). This setting is only used if # swift_store_auth_version is 2. (string value) #swift_store_endpoint_type = publicURL # A string giving the service type of the swift service to use. This # setting is only used if swift_store_auth_version is 2. (string # value) #swift_store_service_type = object-store # Container within the account that the account should use for storing # images in Swift when using single container mode. In multiple # container mode, this will be the prefix for all containers. (string # value) #swift_store_container = glance # The size, in MB, that Glance will start chunking image files and do # a large object manifest in Swift. (integer value) #swift_store_large_object_size = 5120 # The amount of data written to a temporary disk buffer during the # process of chunking the image file. (integer value) #swift_store_large_object_chunk_size = 200 # A boolean value that determines if we create the container if it # does not exist. (boolean value) #swift_store_create_container_on_put = false # If set to True, enables multi-tenant storage mode which causes # Glance images to be stored in tenant specific Swift accounts. # (boolean value) #swift_store_multi_tenant = false # When set to 0, a single-tenant store will only use one container to # store all images. When set to an integer value between 1 and 32, a # single-tenant store will use multiple containers to store images, # and this value will determine how many containers are created.Used # only when swift_store_multi_tenant is disabled. The total number of # containers that will be used is equal to 16^N, so if this config # option is set to 2, then 16^2=256 containers will be used to store # images. (integer value) #swift_store_multiple_containers_seed = 0 # A list of tenants that will be granted read/write access on all # Swift containers created by Glance in multi-tenant mode. (list # value) #swift_store_admin_tenants = # If set to False, disables SSL layer compression of https swift # requests. Setting to False may improve performance for images which # are already in a compressed format, eg qcow2. (boolean value) #swift_store_ssl_compression = true # The number of times a Swift download will be retried before the # request fails. (integer value) #swift_store_retry_get_count = 0 # The period of time (in seconds) before token expirationwhen # glance_store will try to reques new user token. Default value 60 sec # means that if token is going to expire in 1 min then glance_store # request new user token. (integer value) #swift_store_expire_soon_interval = 60 # If set to True create a trust for each add/get request to Multi- # tenant store in order to prevent authentication token to be expired # during uploading/downloading data. If set to False then user token # is used for Swift connection (so no overhead on trust creation). # Please note that this option is considered only and only if # swift_store_multi_tenant=True (boolean value) #swift_store_use_trusts = true # The reference to the default swift account/backing store parameters # to use for adding new images. (string value) #default_swift_reference = ref1 # Version of the authentication service to use. Valid versions are 2 # and 3 for keystone and 1 (deprecated) for swauth and rackspace. # (deprecated - use "auth_version" in swift_store_config_file) (string # value) #swift_store_auth_version = 2 # The address where the Swift authentication service is listening. # (deprecated - use "auth_address" in swift_store_config_file) (string # value) #swift_store_auth_address = # The user to authenticate against the Swift authentication service # (deprecated - use "user" in swift_store_config_file) (string value) #swift_store_user = # Auth key for the user authenticating against the Swift # authentication service. (deprecated - use "key" in # swift_store_config_file) (string value) #swift_store_key = # The config file that has the swift account(s)configs. (string value) #swift_store_config_file = # RADOS images will be chunked into objects of this size (in # megabytes). For best performance, this should be a power of two. # (integer value) #rbd_store_chunk_size = 8 # RADOS pool in which images are stored. (string value) #rbd_store_pool = images # RADOS user to authenticate as (only applicable if using Cephx. If # , a default will be chosen based on the client. section in # rbd_store_ceph_conf) (string value) #rbd_store_user = # Ceph configuration file path. If , librados will locate the # default config. If using cephx authentication, this file should # include a reference to the right keyring in a client. section # (string value) #rbd_store_ceph_conf = /etc/ceph/ceph.conf # Timeout value (in seconds) used when connecting to ceph cluster. If # value <= 0, no timeout is set and default librados value is used. # (integer value) #rados_connect_timeout = 0 # Info to match when looking for cinder in the service catalog. Format # is : separated values of the form: # :: (string value) #cinder_catalog_info = volumev2::publicURL # Override service catalog lookup with template for cinder endpoint # e.g. http://localhost:8776/v2/%(tenant)s (string value) #cinder_endpoint_template = # Region name of this node. If specified, it will be used to locate # OpenStack services for stores. (string value) # Deprecated group/name - [DEFAULT]/os_region_name #cinder_os_region_name = # Location of ca certicates file to use for cinder client requests. # (string value) #cinder_ca_certificates_file = # Number of cinderclient retries on failed http calls (integer value) #cinder_http_retries = 3 # Time period of time in seconds to wait for a cinder volume # transition to complete. (integer value) #cinder_state_transition_timeout = 300 # Allow to perform insecure SSL requests to cinder (boolean value) #cinder_api_insecure = false # The address where the Cinder authentication service is listening. If # , the cinder endpoint in the service catalog is used. (string # value) #cinder_store_auth_address = # User name to authenticate against Cinder. If , the user of # current context is used. (string value) #cinder_store_user_name = # Password for the user authenticating against Cinder. If , the # current context auth token is used. (string value) #cinder_store_password = # Project name where the image is stored in Cinder. If , the # project in current context is used. (string value) #cinder_store_project_name = # Path to the rootwrap configuration file to use for running commands # as root. (string value) #rootwrap_config = /etc/glance/rootwrap.conf # The host where the S3 server is listening. (string value) #s3_store_host = # The S3 query token access key. (string value) #s3_store_access_key = # The S3 query token secret key. (string value) #s3_store_secret_key = # The S3 bucket to be used to store the Glance data. (string value) #s3_store_bucket = # The local directory where uploads will be staged before they are # transferred into S3. (string value) #s3_store_object_buffer_dir = # A boolean to determine if the S3 bucket should be created on upload # if it does not exist or if an error should be returned to the user. # (boolean value) #s3_store_create_bucket_on_put = false # The S3 calling format used to determine the bucket. Either subdomain # or path can be used. (string value) #s3_store_bucket_url_format = subdomain # What size, in MB, should S3 start chunking image files and do a # multipart upload in S3. (integer value) #s3_store_large_object_size = 100 # What multipart upload part size, in MB, should S3 use when uploading # parts. The size must be greater than or equal to 5M. (integer value) #s3_store_large_object_chunk_size = 10 # The number of thread pools to perform a multipart upload in S3. # (integer value) #s3_store_thread_pools = 10 # Enable the use of a proxy. (boolean value) #s3_store_enable_proxy = false # Address or hostname for the proxy server. (string value) #s3_store_proxy_host = # The port to use when connecting over a proxy. (integer value) #s3_store_proxy_port = 8080 # The username to connect to the proxy. (string value) #s3_store_proxy_user = # The password to use when connecting over a proxy. (string value) #s3_store_proxy_password = # Images will be chunked into objects of this size (in megabytes). For # best performance, this should be a power of two. (integer value) #sheepdog_store_chunk_size = 64 # Port of sheep daemon. (integer value) #sheepdog_store_port = 7000 # IP address of sheep daemon. (string value) #sheepdog_store_address = localhost # Directory to which the Filesystem backend store writes images. # (string value) #filesystem_store_datadir = /var/lib/glance/images # List of directories and its priorities to which the Filesystem # backend store writes images. (multi valued) #filesystem_store_datadirs = # The path to a file which contains the metadata to be returned with # any location associated with this store. The file must contain a # valid JSON object. The object should contain the keys 'id' and # 'mountpoint'. The value for both keys should be 'string'. (string # value) #filesystem_store_metadata_file = # The required permission for created image file. In this way the user # other service used, e.g. Nova, who consumes the image could be the # exclusive member of the group that owns the files created. Assigning # it less then or equal to zero means don't change the default # permission of the file. This value will be decoded as an octal # digit. (integer value) #filesystem_store_file_perm = 0 # ESX/ESXi or vCenter Server target system. The server value can be an # IP address or a DNS name. (string value) #vmware_server_host = # Username for authenticating with VMware ESX/VC server. (string # value) #vmware_server_username = # Password for authenticating with VMware ESX/VC server. (string # value) #vmware_server_password = # Number of times VMware ESX/VC server API must be retried upon # connection related issues. (integer value) #vmware_api_retry_count = 10 # The interval used for polling remote tasks invoked on VMware ESX/VC # server. (integer value) #vmware_task_poll_interval = 5 # The name of the directory where the glance images will be stored in # the VMware datastore. (string value) #vmware_store_image_dir = /openstack_glance # If true, the ESX/vCenter server certificate is not verified. If # false, then the default CA truststore is used for verification. This # option is ignored if "vmware_ca_file" is set. (boolean value) # Deprecated group/name - [DEFAULT]/vmware_api_insecure #vmware_insecure = false # Specify a CA bundle file to use in verifying the ESX/vCenter server # certificate. (string value) #vmware_ca_file = # A list of datastores where the image can be stored. This option may # be specified multiple times for specifying multiple datastores. The # datastore name should be specified after its datacenter path, # seperated by ":". An optional weight may be given after the # datastore name, seperated again by ":". Thus, the required format # becomes ::. When # adding an image, the datastore with highest weight will be selected, # unless there is not enough free space available in cases where the # image size is already known. If no weight is given, it is assumed to # be zero and the directory will be considered for selection last. If # multiple datastores have the same weight, then the one with the most # free space available is selected. (multi valued) #vmware_datastores = [keystone_authtoken] # # From keystonemiddleware.auth_token # # Complete public Identity API endpoint. (string value) #auth_uri = # API version of the admin Identity API endpoint. (string value) #auth_version = # Do not handle authorization requests within the middleware, but # delegate the authorization decision to downstream WSGI components. # (boolean value) #delay_auth_decision = false # Request timeout value for communicating with Identity API server. # (integer value) #http_connect_timeout = # How many times are we trying to reconnect when communicating with # Identity API Server. (integer value) #http_request_max_retries = 3 # Env key for the swift cache. (string value) #cache = # Required if identity server requires client certificate (string # value) #certfile = # Required if identity server requires client certificate (string # value) #keyfile = # A PEM encoded Certificate Authority to use when verifying HTTPs # connections. Defaults to system CAs. (string value) #cafile = # Verify HTTPS connections. (boolean value) #insecure = false # The region in which the identity server can be found. (string value) #region_name = # Directory used to cache files related to PKI tokens. (string value) #signing_dir = # Optionally specify a list of memcached server(s) to use for caching. # If left undefined, tokens will instead be cached in-process. (list # value) # Deprecated group/name - [DEFAULT]/memcache_servers #memcached_servers = # In order to prevent excessive effort spent validating tokens, the # middleware caches previously-seen tokens for a configurable duration # (in seconds). Set to -1 to disable caching completely. (integer # value) #token_cache_time = 300 # Determines the frequency at which the list of revoked tokens is # retrieved from the Identity service (in seconds). A high number of # revocation events combined with a low cache duration may # significantly reduce performance. (integer value) #revocation_cache_time = 10 # (Optional) If defined, indicate whether token data should be # authenticated or authenticated and encrypted. If MAC, token data is # authenticated (with HMAC) in the cache. If ENCRYPT, token data is # encrypted and authenticated in the cache. If the value is not one of # these options or empty, auth_token will raise an exception on # initialization. (string value) # Allowed values: None, MAC, ENCRYPT #memcache_security_strategy = None # (Optional, mandatory if memcache_security_strategy is defined) This # string is used for key derivation. (string value) #memcache_secret_key = # (Optional) Number of seconds memcached server is considered dead # before it is tried again. (integer value) #memcache_pool_dead_retry = 300 # (Optional) Maximum total number of open connections to every # memcached server. (integer value) #memcache_pool_maxsize = 10 # (Optional) Socket timeout in seconds for communicating with a # memcached server. (integer value) #memcache_pool_socket_timeout = 3 # (Optional) Number of seconds a connection to memcached is held # unused in the pool before it is closed. (integer value) #memcache_pool_unused_timeout = 60 # (Optional) Number of seconds that an operation will wait to get a # memcached client connection from the pool. (integer value) #memcache_pool_conn_get_timeout = 10 # (Optional) Use the advanced (eventlet safe) memcached client pool. # The advanced pool will only work under python 2.x. (boolean value) #memcache_use_advanced_pool = false # (Optional) Indicate whether to set the X-Service-Catalog header. If # False, middleware will not ask for service catalog on token # validation and will not set the X-Service-Catalog header. (boolean # value) #include_service_catalog = true # Used to control the use and type of token binding. Can be set to: # "disabled" to not check token binding. "permissive" (default) to # validate binding information if the bind type is of a form known to # the server and ignore it if not. "strict" like "permissive" but if # the bind type is unknown the token will be rejected. "required" any # form of token binding is needed to be allowed. Finally the name of a # binding method that must be present in tokens. (string value) #enforce_token_bind = permissive # If true, the revocation list will be checked for cached tokens. This # requires that PKI tokens are configured on the identity server. # (boolean value) #check_revocations_for_cached = false # Hash algorithms to use for hashing PKI tokens. This may be a single # algorithm or multiple. The algorithms are those supported by Python # standard hashlib.new(). The hashes will be tried in the order given, # so put the preferred one first for performance. The result of the # first hash will be stored in the cache. This will typically be set # to multiple values only while migrating from a less secure algorithm # to a more secure one. Once all the old tokens are expired this # option should be set to a single value for better performance. (list # value) #hash_algorithms = md5 # Authentication type to load (unknown value) # Deprecated group/name - [DEFAULT]/auth_plugin #auth_type = # Config Section from which to load plugin specific options (unknown # value) #auth_section = [paste_deploy] # # From glance.glare # # Partial name of a pipeline in your paste configuration file with the # service name removed. For example, if your paste section name is # [pipeline:glance-api-keystone] use the value "keystone" (string # value) #flavor = # Name of the paste configuration file. (string value) #config_file = glance-12.0.0/etc/glance-scrubber.conf0000664000567000056710000004636412701407047020663 0ustar jenkinsjenkins00000000000000[DEFAULT] # # From glance.scrubber # # Whether to allow users to specify image properties beyond what the # image schema provides (boolean value) #allow_additional_image_properties = true # Maximum number of image members per image. Negative values evaluate # to unlimited. (integer value) #image_member_quota = 128 # Maximum number of properties allowed on an image. Negative values # evaluate to unlimited. (integer value) #image_property_quota = 128 # Maximum number of tags allowed on an image. Negative values evaluate # to unlimited. (integer value) #image_tag_quota = 128 # Maximum number of locations allowed on an image. Negative values # evaluate to unlimited. (integer value) #image_location_quota = 10 # Python module path of data access API (string value) #data_api = glance.db.sqlalchemy.api # Default value for the number of items returned by a request if not # specified explicitly in the request (integer value) #limit_param_default = 25 # Maximum permissible number of items that could be returned by a # request (integer value) #api_limit_max = 1000 # Whether to include the backend image storage location in image # properties. Revealing storage location can be a security risk, so # use this setting with caution! (boolean value) #show_image_direct_url = false # Whether to include the backend image locations in image properties. # For example, if using the file system store a URL of # "file:///path/to/image" will be returned to the user in the # 'direct_url' meta-data field. Revealing storage location can be a # security risk, so use this setting with caution! Setting this to # true overrides the show_image_direct_url option. (boolean value) #show_multiple_locations = false # Maximum size of image a user can upload in bytes. Defaults to # 1099511627776 bytes (1 TB).WARNING: this value should only be # increased after careful consideration and must be set to a value # under 8 EB (9223372036854775808). (integer value) # Maximum value: 9223372036854775808 #image_size_cap = 1099511627776 # Set a system wide quota for every user. This value is the total # capacity that a user can use across all storage systems. A value of # 0 means unlimited.Optional unit can be specified for the value. # Accepted units are B, KB, MB, GB and TB representing Bytes, # KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no # unit is specified then Bytes is assumed. Note that there should not # be any space between value and unit and units are case sensitive. # (string value) #user_storage_quota = 0 # Deploy the v1 OpenStack Images API. (boolean value) #enable_v1_api = true # Deploy the v2 OpenStack Images API. (boolean value) #enable_v2_api = true # Deploy the v1 OpenStack Registry API. (boolean value) #enable_v1_registry = true # Deploy the v2 OpenStack Registry API. (boolean value) #enable_v2_registry = true # The hostname/IP of the pydev process listening for debug connections # (string value) #pydev_worker_debug_host = # The port on which a pydev process is listening for connections. # (port value) # Minimum value: 0 # Maximum value: 65535 #pydev_worker_debug_port = 5678 # AES key for encrypting store 'location' metadata. This includes, if # used, Swift or S3 credentials. Should be set to a random string of # length 16, 24 or 32 bytes (string value) #metadata_encryption_key = # Digest algorithm which will be used for digital signature. Use the # command "openssl list-message-digest-algorithms" to get the # available algorithms supported by the version of OpenSSL on the # platform. Examples are "sha1", "sha256", "sha512", etc. (string # value) #digest_algorithm = sha256 # The amount of time in seconds to delay before performing a delete. # (integer value) #scrub_time = 0 # The size of thread pool to be used for scrubbing images. The default # is one, which signifies serial scrubbing. Any value above one # indicates the max number of images that may be scrubbed in parallel. # (integer value) #scrub_pool_size = 1 # Turn on/off delayed delete. (boolean value) #delayed_delete = false # Role used to identify an authenticated user as administrator. # (string value) #admin_role = admin # Whether to pass through headers containing user and tenant # information when making requests to the registry. This allows the # registry to use the context middleware without keystonemiddleware's # auth_token middleware, removing calls to the keystone auth service. # It is recommended that when using this option, secure communication # between glance api and glance registry is ensured by means other # than auth_token middleware. (boolean value) #send_identity_headers = false # Loop time between checking for new items to schedule for delete. # (integer value) #wakeup_time = 300 # Run as a long-running process. When not specified (the default) run # the scrub operation once and then exits. When specified do not exit # and run scrub on wakeup_time interval as specified in the config. # (boolean value) #daemon = false # The protocol to use for communication with the registry server. # Either http or https. (string value) #registry_client_protocol = http # The path to the key file to use in SSL connections to the registry # server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE # environment variable to a filepath of the key file (string value) #registry_client_key_file = # The path to the cert file to use in SSL connections to the registry # server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE # environment variable to a filepath of the CA cert file (string # value) #registry_client_cert_file = # The path to the certifying authority cert file to use in SSL # connections to the registry server, if any. Alternately, you may set # the GLANCE_CLIENT_CA_FILE environment variable to a filepath of the # CA cert file. (string value) #registry_client_ca_file = # When using SSL in connections to the registry server, do not require # validation via a certifying authority. This is the registry's # equivalent of specifying --insecure on the command line using # glanceclient for the API. (boolean value) #registry_client_insecure = false # The period of time, in seconds, that the API server will wait for a # registry request to complete. A value of 0 implies no timeout. # (integer value) #registry_client_timeout = 600 # Whether to pass through the user token when making requests to the # registry. To prevent failures with token expiration during big files # upload, it is recommended to set this parameter to False.If # "use_user_token" is not in effect, then admin credentials can be # specified. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #use_user_token = true # The administrators user name. If "use_user_token" is not in effect, # then admin credentials can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_user = # The administrators password. If "use_user_token" is not in effect, # then admin credentials can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_password = # The tenant name of the administrative user. If "use_user_token" is # not in effect, then admin tenant name can be specified. (string # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_tenant_name = # The URL to the keystone service. If "use_user_token" is not in # effect and using keystone auth, then URL of keystone can be # specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_url = # The strategy to use for authentication. If "use_user_token" is not # in effect, then auth strategy can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_strategy = noauth # The region for the authentication service. If "use_user_token" is # not in effect and using keystone auth, then region name can be # specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_region = # Address to find the registry server. (string value) #registry_host = 0.0.0.0 # Port the registry server is listening on. (port value) # Minimum value: 0 # Maximum value: 65535 #registry_port = 9191 # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of # the default INFO level. (boolean value) #debug = false # If set to false, the logging level will be set to WARNING instead of # the default INFO level. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #verbose = true # The name of a logging configuration file. This file is appended to # any existing logging configuration files. For details about logging # configuration files, see the Python logging module documentation. # Note that when logging configuration files are used then all logging # configuration is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. # (string value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default # is set, logging will go to stderr as defined by use_stderr. This # option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. # This option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Uses logging handler designed to watch file system. When log file is # moved or removed this handler will open a new log file with # specified path instantaneously. It makes sense only if log_file # option is specified and Linux platform is used. This option is # ignored if log_config_append is set. (boolean value) #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and # will be changed later to honor RFC5424. This option is ignored if # log_config_append is set. (boolean value) #use_syslog = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if # log_config_append is set. (boolean value) #use_stderr = true # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. # (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the # message is DEBUG. (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. (string # value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. (string value) #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is # ignored if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. # (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. # (string value) #instance_uuid_format = "[instance: %(uuid)s] " # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [database] # # From oslo.db # # The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) # Deprecated group/name - [DEFAULT]/sqlite_synchronous #sqlite_synchronous = true # The back end to use for the database. (string value) # Deprecated group/name - [DEFAULT]/db_backend #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. # (string value) # Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [sql]/connection #connection = # The SQLAlchemy connection string to use to connect to the slave # database. (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including # the default, overrides any server-set SQL mode. To use whatever SQL # mode is set by the server configuration, set this to no value. # Example: mysql_sql_mode= (string value) #mysql_sql_mode = TRADITIONAL # Timeout before idle SQL connections are reaped. (integer value) # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout #idle_timeout = 3600 # Minimum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_min_pool_size # Deprecated group/name - [DATABASE]/sql_min_pool_size #min_pool_size = 1 # Maximum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_pool_size # Deprecated group/name - [DATABASE]/sql_max_pool_size #max_pool_size = # Maximum number of database connection retries during startup. Set to # -1 to specify an infinite retry count. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries #max_retries = 10 # Interval between retries of opening a SQL connection. (integer # value) # Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DATABASE]/reconnect_interval #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_overflow # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. # (integer value) # Deprecated group/name - [DEFAULT]/sql_connection_debug #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) # Deprecated group/name - [DEFAULT]/sql_connection_trace #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer # value) # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout #pool_timeout = # Enable the experimental use of database reconnect on connection # lost. (boolean value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database # operation up to db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries # of a database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before # error is raised. Set to -1 to specify an infinite retry count. # (integer value) #db_max_retries = 20 # # From oslo.db.concurrency # # Enable the experimental use of thread pooling for all DB API calls # (boolean value) # Deprecated group/name - [DEFAULT]/dbapi_use_tpool #use_tpool = false [oslo_concurrency] # # From oslo.concurrency # # Enables or disables inter-process locks. (boolean value) # Deprecated group/name - [DEFAULT]/disable_process_locking #disable_process_locking = false # Directory to use for lock files. For security, the specified # directory should only be writable by the user running the processes # that need locking. Defaults to environment variable OSLO_LOCK_PATH. # If external locks are used, a lock path must be set. (string value) # Deprecated group/name - [DEFAULT]/lock_path #lock_path = [oslo_policy] # # From oslo.policy # # The JSON file that defines policies. (string value) # Deprecated group/name - [DEFAULT]/policy_file #policy_file = policy.json # Default rule. Enforced when a requested rule is not found. (string # value) # Deprecated group/name - [DEFAULT]/policy_default_rule #policy_default_rule = default # Directories where policy configuration files are stored. They can be # relative to any directory in the search path defined by the # config_dir option, or absolute paths. The file defined by # policy_file must exist for these directories to be searched. # Missing or empty directories are ignored. (multi valued) # Deprecated group/name - [DEFAULT]/policy_dirs #policy_dirs = policy.d glance-12.0.0/etc/property-protections-policies.conf.sample0000664000567000056710000000224212701407047025130 0ustar jenkinsjenkins00000000000000# property-protections-policies.conf.sample # # This file is an example config file for when # property_protection_rule_format=policies is enabled. # # Specify regular expression for which properties will be protected in [] # For each section, specify CRUD permissions. You may refer to policies defined # in policy.json. # The property rules will be applied in the order specified. Once # a match is found the remaining property rules will not be applied. # # WARNING: # * If the reg ex specified below does not compile, then # the glance-api service fails to start. (Guide for reg ex python compiler # used: # http://docs.python.org/2/library/re.html#regular-expression-syntax) # * If an operation(create, read, update, delete) is not specified or misspelt # then the glance-api service fails to start. # So, remember, with GREAT POWER comes GREAT RESPONSIBILITY! # # NOTE: Only one policy can be specified per action. If multiple policies are # specified, then the glance-api service fails to start. [^x_.*] create = default read = default update = default delete = default [.*] create = context_is_admin read = context_is_admin update = context_is_admin delete = context_is_admin glance-12.0.0/etc/glance-registry-paste.ini0000664000567000056710000000232112701407047021651 0ustar jenkinsjenkins00000000000000# Use this pipeline for no auth - DEFAULT [pipeline:glance-registry] pipeline = healthcheck osprofiler unauthenticated-context registryapp # Use this pipeline for keystone auth [pipeline:glance-registry-keystone] pipeline = healthcheck osprofiler authtoken context registryapp # Use this pipeline for authZ only. This means that the registry will treat a # user as authenticated without making requests to keystone to reauthenticate # the user. [pipeline:glance-registry-trusted-auth] pipeline = healthcheck osprofiler context registryapp [app:registryapp] paste.app_factory = glance.registry.api:API.factory [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /etc/glance/healthcheck_disable [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory hmac_keys = SECRET_KEY #DEPRECATED enabled = yes #DEPRECATED glance-12.0.0/etc/ovf-metadata.json.sample0000664000567000056710000000017612701407047021470 0ustar jenkinsjenkins00000000000000{ "cim_pasd": [ "ProcessorArchitecture", "InstructionSet", "InstructionSetExtensionName" ] } glance-12.0.0/etc/glance-manage.conf0000664000567000056710000001763212701407047020300 0ustar jenkinsjenkins00000000000000[DEFAULT] # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of # the default INFO level. (boolean value) #debug = false # If set to false, the logging level will be set to WARNING instead of # the default INFO level. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #verbose = true # The name of a logging configuration file. This file is appended to # any existing logging configuration files. For details about logging # configuration files, see the Python logging module documentation. # Note that when logging configuration files are used then all logging # configuration is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. # (string value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default # is set, logging will go to stderr as defined by use_stderr. This # option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. # This option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Uses logging handler designed to watch file system. When log file is # moved or removed this handler will open a new log file with # specified path instantaneously. It makes sense only if log_file # option is specified and Linux platform is used. This option is # ignored if log_config_append is set. (boolean value) #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and # will be changed later to honor RFC5424. This option is ignored if # log_config_append is set. (boolean value) #use_syslog = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if # log_config_append is set. (boolean value) #use_stderr = true # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. # (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the # message is DEBUG. (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. (string # value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. (string value) #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is # ignored if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. # (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. # (string value) #instance_uuid_format = "[instance: %(uuid)s] " # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [database] # # From oslo.db # # The file name to use with SQLite. (string value) # Deprecated group/name - [DEFAULT]/sqlite_db #sqlite_db = oslo.sqlite # If True, SQLite uses synchronous mode. (boolean value) # Deprecated group/name - [DEFAULT]/sqlite_synchronous #sqlite_synchronous = true # The back end to use for the database. (string value) # Deprecated group/name - [DEFAULT]/db_backend #backend = sqlalchemy # The SQLAlchemy connection string to use to connect to the database. # (string value) # Deprecated group/name - [DEFAULT]/sql_connection # Deprecated group/name - [DATABASE]/sql_connection # Deprecated group/name - [sql]/connection #connection = # The SQLAlchemy connection string to use to connect to the slave # database. (string value) #slave_connection = # The SQL mode to be used for MySQL sessions. This option, including # the default, overrides any server-set SQL mode. To use whatever SQL # mode is set by the server configuration, set this to no value. # Example: mysql_sql_mode= (string value) #mysql_sql_mode = TRADITIONAL # Timeout before idle SQL connections are reaped. (integer value) # Deprecated group/name - [DEFAULT]/sql_idle_timeout # Deprecated group/name - [DATABASE]/sql_idle_timeout # Deprecated group/name - [sql]/idle_timeout #idle_timeout = 3600 # Minimum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_min_pool_size # Deprecated group/name - [DATABASE]/sql_min_pool_size #min_pool_size = 1 # Maximum number of SQL connections to keep open in a pool. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_pool_size # Deprecated group/name - [DATABASE]/sql_max_pool_size #max_pool_size = # Maximum number of database connection retries during startup. Set to # -1 to specify an infinite retry count. (integer value) # Deprecated group/name - [DEFAULT]/sql_max_retries # Deprecated group/name - [DATABASE]/sql_max_retries #max_retries = 10 # Interval between retries of opening a SQL connection. (integer # value) # Deprecated group/name - [DEFAULT]/sql_retry_interval # Deprecated group/name - [DATABASE]/reconnect_interval #retry_interval = 10 # If set, use this value for max_overflow with SQLAlchemy. (integer # value) # Deprecated group/name - [DEFAULT]/sql_max_overflow # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow #max_overflow = 50 # Verbosity of SQL debugging information: 0=None, 100=Everything. # (integer value) # Deprecated group/name - [DEFAULT]/sql_connection_debug #connection_debug = 0 # Add Python stack traces to SQL as comment strings. (boolean value) # Deprecated group/name - [DEFAULT]/sql_connection_trace #connection_trace = false # If set, use this value for pool_timeout with SQLAlchemy. (integer # value) # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout #pool_timeout = # Enable the experimental use of database reconnect on connection # lost. (boolean value) #use_db_reconnect = false # Seconds between retries of a database transaction. (integer value) #db_retry_interval = 1 # If True, increases the interval between retries of a database # operation up to db_max_retry_interval. (boolean value) #db_inc_retry_interval = true # If db_inc_retry_interval is set, the maximum seconds between retries # of a database operation. (integer value) #db_max_retry_interval = 10 # Maximum retries in case of connection error or deadlock error before # error is raised. Set to -1 to specify an infinite retry count. # (integer value) #db_max_retries = 20 # # From oslo.db.concurrency # # Enable the experimental use of thread pooling for all DB API calls # (boolean value) # Deprecated group/name - [DEFAULT]/dbapi_use_tpool #use_tpool = false glance-12.0.0/etc/glance-swift.conf.sample0000664000567000056710000000113612701407047021454 0ustar jenkinsjenkins00000000000000# glance-swift.conf.sample # # This file is an example config file when # multiple swift accounts/backing stores are enabled. # # Specify the reference name in [] # For each section, specify the auth_address, user and key. # # WARNING: # * If any of auth_address, user or key is not specified, # the glance-api's swift store will fail to configure [ref1] user = tenant:user1 key = key1 auth_version = 2 auth_address = http://localhost:5000/v2.0 [ref2] user = project_name:user_name2 key = key2 user_domain_id = default project_domain_id = default auth_version = 3 auth_address = http://localhost:5000/v3 glance-12.0.0/etc/glance-cache.conf0000664000567000056710000003211312701407047020102 0ustar jenkinsjenkins00000000000000[DEFAULT] # # From glance.cache # # Whether to allow users to specify image properties beyond what the # image schema provides (boolean value) #allow_additional_image_properties = true # Maximum number of image members per image. Negative values evaluate # to unlimited. (integer value) #image_member_quota = 128 # Maximum number of properties allowed on an image. Negative values # evaluate to unlimited. (integer value) #image_property_quota = 128 # Maximum number of tags allowed on an image. Negative values evaluate # to unlimited. (integer value) #image_tag_quota = 128 # Maximum number of locations allowed on an image. Negative values # evaluate to unlimited. (integer value) #image_location_quota = 10 # Python module path of data access API (string value) #data_api = glance.db.sqlalchemy.api # Default value for the number of items returned by a request if not # specified explicitly in the request (integer value) #limit_param_default = 25 # Maximum permissible number of items that could be returned by a # request (integer value) #api_limit_max = 1000 # Whether to include the backend image storage location in image # properties. Revealing storage location can be a security risk, so # use this setting with caution! (boolean value) #show_image_direct_url = false # Whether to include the backend image locations in image properties. # For example, if using the file system store a URL of # "file:///path/to/image" will be returned to the user in the # 'direct_url' meta-data field. Revealing storage location can be a # security risk, so use this setting with caution! Setting this to # true overrides the show_image_direct_url option. (boolean value) #show_multiple_locations = false # Maximum size of image a user can upload in bytes. Defaults to # 1099511627776 bytes (1 TB).WARNING: this value should only be # increased after careful consideration and must be set to a value # under 8 EB (9223372036854775808). (integer value) # Maximum value: 9223372036854775808 #image_size_cap = 1099511627776 # Set a system wide quota for every user. This value is the total # capacity that a user can use across all storage systems. A value of # 0 means unlimited.Optional unit can be specified for the value. # Accepted units are B, KB, MB, GB and TB representing Bytes, # KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no # unit is specified then Bytes is assumed. Note that there should not # be any space between value and unit and units are case sensitive. # (string value) #user_storage_quota = 0 # Deploy the v1 OpenStack Images API. (boolean value) #enable_v1_api = true # Deploy the v2 OpenStack Images API. (boolean value) #enable_v2_api = true # Deploy the v1 OpenStack Registry API. (boolean value) #enable_v1_registry = true # Deploy the v2 OpenStack Registry API. (boolean value) #enable_v2_registry = true # The hostname/IP of the pydev process listening for debug connections # (string value) #pydev_worker_debug_host = # The port on which a pydev process is listening for connections. # (port value) # Minimum value: 0 # Maximum value: 65535 #pydev_worker_debug_port = 5678 # AES key for encrypting store 'location' metadata. This includes, if # used, Swift or S3 credentials. Should be set to a random string of # length 16, 24 or 32 bytes (string value) #metadata_encryption_key = # Digest algorithm which will be used for digital signature. Use the # command "openssl list-message-digest-algorithms" to get the # available algorithms supported by the version of OpenSSL on the # platform. Examples are "sha1", "sha256", "sha512", etc. (string # value) #digest_algorithm = sha256 # The path to the sqlite file database that will be used for image # cache management. (string value) #image_cache_sqlite_db = cache.db # The driver to use for image cache management. (string value) #image_cache_driver = sqlite # The upper limit (the maximum size of accumulated cache in bytes) # beyond which the cache pruner, if running, starts cleaning the image # cache. (integer value) #image_cache_max_size = 10737418240 # The amount of time to let an incomplete image remain in the cache, # before the cache cleaner, if running, will remove the incomplete # image. (integer value) #image_cache_stall_time = 86400 # Base directory that the image cache uses. (string value) #image_cache_dir = # Address to find the registry server. (string value) #registry_host = 0.0.0.0 # Port the registry server is listening on. (port value) # Minimum value: 0 # Maximum value: 65535 #registry_port = 9191 # Whether to pass through the user token when making requests to the # registry. To prevent failures with token expiration during big files # upload, it is recommended to set this parameter to False.If # "use_user_token" is not in effect, then admin credentials can be # specified. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #use_user_token = true # The administrators user name. If "use_user_token" is not in effect, # then admin credentials can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_user = # The administrators password. If "use_user_token" is not in effect, # then admin credentials can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_password = # The tenant name of the administrative user. If "use_user_token" is # not in effect, then admin tenant name can be specified. (string # value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #admin_tenant_name = # The URL to the keystone service. If "use_user_token" is not in # effect and using keystone auth, then URL of keystone can be # specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_url = # The strategy to use for authentication. If "use_user_token" is not # in effect, then auth strategy can be specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_strategy = noauth # The region for the authentication service. If "use_user_token" is # not in effect and using keystone auth, then region name can be # specified. (string value) # This option is deprecated for removal. # Its value may be silently ignored in the future. # Reason: This option was considered harmful and has been deprecated # in M release. It will be removed in O release. For more information # read OSSN-0060. Related functionality with uploading big images has # been implemented with Keystone trusts support. #auth_region = # # From oslo.log # # If set to true, the logging level will be set to DEBUG instead of # the default INFO level. (boolean value) #debug = false # If set to false, the logging level will be set to WARNING instead of # the default INFO level. (boolean value) # This option is deprecated for removal. # Its value may be silently ignored in the future. #verbose = true # The name of a logging configuration file. This file is appended to # any existing logging configuration files. For details about logging # configuration files, see the Python logging module documentation. # Note that when logging configuration files are used then all logging # configuration is set in the configuration file and other logging # configuration options are ignored (for example, # logging_context_format_string). (string value) # Deprecated group/name - [DEFAULT]/log_config #log_config_append = # Defines the format string for %%(asctime)s in log records. Default: # %(default)s . This option is ignored if log_config_append is set. # (string value) #log_date_format = %Y-%m-%d %H:%M:%S # (Optional) Name of log file to send logging output to. If no default # is set, logging will go to stderr as defined by use_stderr. This # option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logfile #log_file = # (Optional) The base directory used for relative log_file paths. # This option is ignored if log_config_append is set. (string value) # Deprecated group/name - [DEFAULT]/logdir #log_dir = # Uses logging handler designed to watch file system. When log file is # moved or removed this handler will open a new log file with # specified path instantaneously. It makes sense only if log_file # option is specified and Linux platform is used. This option is # ignored if log_config_append is set. (boolean value) #watch_log_file = false # Use syslog for logging. Existing syslog format is DEPRECATED and # will be changed later to honor RFC5424. This option is ignored if # log_config_append is set. (boolean value) #use_syslog = false # Syslog facility to receive log lines. This option is ignored if # log_config_append is set. (string value) #syslog_log_facility = LOG_USER # Log output to standard error. This option is ignored if # log_config_append is set. (boolean value) #use_stderr = true # Format string to use for log messages with context. (string value) #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s # Format string to use for log messages when context is undefined. # (string value) #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # Additional data to append to log message when logging level for the # message is DEBUG. (string value) #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d # Prefix each line of exception output with this format. (string # value) #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s # Defines the format string for %(user_identity)s that is used in # logging_context_format_string. (string value) #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s # List of package logging levels in logger=LEVEL pairs. This option is # ignored if log_config_append is set. (list value) #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO # Enables or disables publication of error events. (boolean value) #publish_errors = false # The format for an instance that is passed with the log message. # (string value) #instance_format = "[instance: %(uuid)s] " # The format for an instance UUID that is passed with the log message. # (string value) #instance_uuid_format = "[instance: %(uuid)s] " # Enables or disables fatal status of deprecations. (boolean value) #fatal_deprecations = false [oslo_policy] # # From oslo.policy # # The JSON file that defines policies. (string value) # Deprecated group/name - [DEFAULT]/policy_file #policy_file = policy.json # Default rule. Enforced when a requested rule is not found. (string # value) # Deprecated group/name - [DEFAULT]/policy_default_rule #policy_default_rule = default # Directories where policy configuration files are stored. They can be # relative to any directory in the search path defined by the # config_dir option, or absolute paths. The file defined by # policy_file must exist for these directories to be searched. # Missing or empty directories are ignored. (multi valued) # Deprecated group/name - [DEFAULT]/policy_dirs #policy_dirs = policy.d glance-12.0.0/etc/schema-image.json0000664000567000056710000000254412701407047020161 0ustar jenkinsjenkins00000000000000{ "kernel_id": { "type": ["null", "string"], "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." }, "ramdisk_id": { "type": ["null", "string"], "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." }, "instance_uuid": { "type": "string", "description": "Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.)" }, "architecture": { "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", "type": "string" }, "os_distro": { "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", "type": "string" }, "os_version": { "description": "Operating system version as specified by the distributor", "type": "string" } } glance-12.0.0/glance/0000775000567000056710000000000012701407204015412 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/glare/0000775000567000056710000000000012701407204016504 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/glare/dependency.py0000664000567000056710000001203212701407047021177 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance.common.exception as exc import glance.common.glare.definitions as definitions from glance.glare.domain import proxy from glance.i18n import _ class ArtifactProxy(proxy.Artifact): def __init__(self, artifact, repo): super(ArtifactProxy, self).__init__(artifact) self.artifact = artifact self.repo = repo def set_type_specific_property(self, prop_name, value): if prop_name not in self.metadata.attributes.dependencies: return super(ArtifactProxy, self).set_type_specific_property( prop_name, value) # for every dependency have to transfer dep_id into a dependency itself if value is None: setattr(self.artifact, prop_name, None) else: if not isinstance(value, list): setattr(self.artifact, prop_name, self._fetch_dependency(value)) else: setattr(self.artifact, prop_name, [self._fetch_dependency(dep_id) for dep_id in value]) def _fetch_dependency(self, dep_id): # check for circular dependency id -> id if self.id == dep_id: raise exc.ArtifactCircularDependency() art = self.repo.get(artifact_id=dep_id) # repo returns a proxy of some level. # Need to find the base declarative artifact while not isinstance(art, definitions.ArtifactType): art = art.base return art class ArtifactRepo(proxy.ArtifactRepo): def __init__(self, repo, plugins, item_proxy_class=None, item_proxy_kwargs=None): self.plugins = plugins super(ArtifactRepo, self).__init__(repo, item_proxy_class=ArtifactProxy, item_proxy_kwargs={'repo': self}) def _check_dep_state(self, dep, state): """Raises an exception if dependency 'dep' is not in state 'state'""" if dep.state != state: raise exc.Invalid(_( "Not all dependencies are in '%s' state") % state) def publish(self, artifact, *args, **kwargs): """ Creates transitive dependencies, checks that all dependencies are in active state and transfers artifact from creating to active state """ # make sure that all required dependencies exist artifact.__pre_publish__(*args, **kwargs) # make sure that all dependencies are active for param in artifact.metadata.attributes.dependencies: dependency = getattr(artifact, param) if isinstance(dependency, list): for dep in dependency: self._check_dep_state(dep, 'active') elif dependency: self._check_dep_state(dependency, 'active') # as state is changed on db save, have to retrieve the freshly changed # artifact (the one passed into the func will have old state value) artifact = self.base.publish(self.helper.unproxy(artifact)) return self.helper.proxy(artifact) def remove(self, artifact): """ Checks that artifact has no dependencies and removes it. Otherwise an exception is raised """ for param in artifact.metadata.attributes.dependencies: if getattr(artifact, param): raise exc.Invalid(_( "Dependency property '%s' has to be deleted first") % param) return self.base.remove(self.helper.unproxy(artifact)) class ArtifactFactory(proxy.ArtifactFactory): def __init__(self, base, klass, repo): self.klass = klass self.repo = repo super(ArtifactFactory, self).__init__( base, artifact_proxy_class=ArtifactProxy, artifact_proxy_kwargs={'repo': self.repo}) def new_artifact(self, *args, **kwargs): """ Creates an artifact without dependencies first and then adds them to the newly created artifact """ # filter dependencies no_deps = {p: kwargs[p] for p in kwargs if p not in self.klass.metadata.attributes.dependencies} deps = {p: kwargs[p] for p in kwargs if p in self.klass.metadata.attributes.dependencies} artifact = super(ArtifactFactory, self).new_artifact(*args, **no_deps) # now set dependencies for dep_param, dep_value in deps.items(): setattr(artifact, dep_param, dep_value) return artifact glance-12.0.0/glance/glare/domain/0000775000567000056710000000000012701407204017753 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/glare/domain/__init__.py0000664000567000056710000000527512701407047022102 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from glance.common import timeutils from glance.i18n import _ class Artifact(object): def __init__(self, id, name, version, type_name, type_version, visibility, state, owner, created_at=None, updated_at=None, **kwargs): self.id = id self.name = name self.type_name = type_name self.version = version self.type_version = type_version self.visibility = visibility self.state = state self.owner = owner self.created_at = created_at self.updated_at = updated_at self.description = kwargs.pop('description', None) self.blobs = kwargs.pop('blobs', {}) self.properties = kwargs.pop('properties', {}) self.dependencies = kwargs.pop('dependencies', {}) self.tags = kwargs.pop('tags', []) if kwargs: message = _("__init__() got unexpected keyword argument '%s'") raise TypeError(message % list(kwargs.keys())[0]) class ArtifactFactory(object): def __init__(self, context, klass): self.klass = klass self.context = context def new_artifact(self, name, version, **kwargs): id = kwargs.pop('id', str(uuid.uuid4())) tags = kwargs.pop('tags', []) # pop reserved fields from kwargs dict for param in ['owner', 'created_at', 'updated_at', 'deleted_at', 'visibility', 'state']: kwargs.pop(param, '') curr_timestamp = timeutils.utcnow() base = self.klass(id=id, name=name, version=version, visibility='private', state='creating', # XXX FIXME remove after using authentication # paste-flavor # (no or '' as owner will always be there) owner=self.context.owner or '', created_at=curr_timestamp, updated_at=curr_timestamp, tags=tags, **kwargs) return base glance-12.0.0/glance/glare/domain/proxy.py0000664000567000056710000001510012701407047021510 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from glance.common import exception as exc from glance.domain import proxy as image_proxy def _proxy_artifact_property(attr): def getter(self): return self.get_type_specific_property(attr) def setter(self, value): return self.set_type_specific_property(attr, value) return property(getter, setter) class ArtifactHelper(image_proxy.Helper): """ Artifact-friendly proxy helper: does all the same as regular helper but also dynamically proxies all the type-specific attributes, including properties, blobs and dependencies """ def proxy(self, obj): if obj is None or self.proxy_class is None: return obj if not hasattr(obj, 'metadata'): return super(ArtifactHelper, self).proxy(obj) extra_attrs = {} for att_name in obj.metadata.attributes.all.keys(): extra_attrs[att_name] = _proxy_artifact_property(att_name) new_proxy_class = type("%s(%s)" % (obj.metadata.type_name, self.proxy_class.__module__), (self.proxy_class,), extra_attrs) return new_proxy_class(obj, **self.proxy_kwargs) class ArtifactRepo(object): def __init__(self, base, proxy_helper=None, item_proxy_class=None, item_proxy_kwargs=None): self.base = base if proxy_helper is None: proxy_helper = ArtifactHelper(item_proxy_class, item_proxy_kwargs) self.helper = proxy_helper def get(self, *args, **kwargs): return self.helper.proxy(self.base.get(*args, **kwargs)) def list(self, *args, **kwargs): items = self.base.list(*args, **kwargs) return [self.helper.proxy(item) for item in items] def add(self, item): base_item = self.helper.unproxy(item) result = self.base.add(base_item) return self.helper.proxy(result) def save(self, item): base_item = self.helper.unproxy(item) result = self.base.save(base_item) return self.helper.proxy(result) def remove(self, item): base_item = self.helper.unproxy(item) result = self.base.remove(base_item) return self.helper.proxy(result) def publish(self, item, *args, **kwargs): base_item = self.helper.unproxy(item) result = self.base.publish(base_item, *args, **kwargs) return self.helper.proxy(result) class Artifact(object): def __init__(self, base, proxy_class=None, proxy_kwargs=None): self.base = base self.helper = ArtifactHelper(proxy_class, proxy_kwargs) # it is enough to proxy metadata only, other properties will be proxied # automatically by ArtifactHelper metadata = _proxy_artifact_property('metadata') def set_type_specific_property(self, prop_name, value): setattr(self.base, prop_name, value) def get_type_specific_property(self, prop_name): try: return getattr(self.base, prop_name) except AttributeError: raise exc.ArtifactInvalidProperty(prop=prop_name) def __pre_publish__(self, *args, **kwargs): self.base.__pre_publish__(*args, **kwargs) class ArtifactFactory(object): def __init__(self, base, artifact_proxy_class=Artifact, artifact_proxy_kwargs=None): self.artifact_helper = ArtifactHelper(artifact_proxy_class, artifact_proxy_kwargs) self.base = base def new_artifact(self, *args, **kwargs): t = self.base.new_artifact(*args, **kwargs) return self.artifact_helper.proxy(t) class ArtifactBlob(object): def __init__(self, base, artifact_blob_proxy_class=None, artifact_blob_proxy_kwargs=None): self.base = base self.helper = image_proxy.Helper(artifact_blob_proxy_class, artifact_blob_proxy_kwargs) size = _proxy_artifact_property('size') locations = _proxy_artifact_property('locations') checksum = _proxy_artifact_property('checksum') item_key = _proxy_artifact_property('item_key') def set_type_specific_property(self, prop_name, value): setattr(self.base, prop_name, value) def get_type_specific_property(self, prop_name): return getattr(self.base, prop_name) def to_dict(self): return self.base.to_dict() class ArtifactProperty(object): def __init__(self, base, proxy_class=None, proxy_kwargs=None): self.base = base self.helper = ArtifactHelper(proxy_class, proxy_kwargs) def set_type_specific_property(self, prop_name, value): setattr(self.base, prop_name, value) def get_type_specific_property(self, prop_name): return getattr(self.base, prop_name) class List(collections.MutableSequence): def __init__(self, base, item_proxy_class=None, item_proxy_kwargs=None): self.base = base self.helper = image_proxy.Helper(item_proxy_class, item_proxy_kwargs) def __len__(self): return len(self.base) def __delitem__(self, index): del self.base[index] def __getitem__(self, index): item = self.base[index] return self.helper.proxy(item) def insert(self, index, value): self.base.insert(index, self.helper.unproxy(value)) def __setitem__(self, index, value): self.base[index] = self.helper.unproxy(value) class Dict(collections.MutableMapping): def __init__(self, base, item_proxy_class=None, item_proxy_kwargs=None): self.base = base self.helper = image_proxy.Helper(item_proxy_class, item_proxy_kwargs) def __setitem__(self, key, value): self.base[key] = self.helper.unproxy(value) def __getitem__(self, key): item = self.base[key] return self.helper.proxy(item) def __delitem__(self, key): del self.base[key] def __len__(self): return len(self.base) def __iter__(self): for key in self.base.keys(): yield key glance-12.0.0/glance/glare/__init__.py0000664000567000056710000000302212701407047020617 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from glance.common import exception class Showlevel(object): # None - do not show additional properties and blobs with locations; # Basic - show all artifact fields except dependencies; # Direct - show all artifact fields with only direct dependencies; # Transitive - show all artifact fields with all of dependencies. NONE = 0 BASIC = 1 DIRECT = 2 TRANSITIVE = 3 _level_map = {'none': NONE, 'basic': BASIC, 'direct': DIRECT, 'transitive': TRANSITIVE} _inverted_level_map = {v: k for k, v in six.iteritems(_level_map)} @staticmethod def to_str(n): try: return Showlevel._inverted_level_map[n] except KeyError: raise exception.ArtifactUnsupportedShowLevel() @staticmethod def from_str(str_value): try: return Showlevel._level_map[str_value] except KeyError: raise exception.ArtifactUnsupportedShowLevel() glance-12.0.0/glance/glare/gateway.py0000664000567000056710000000453412701407047020532 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from glance.common import store_utils import glance.db from glance.glare import dependency from glance.glare import domain from glance.glare import location from glance.glare import updater class Gateway(object): def __init__(self, db_api=None, store_api=None, plugins=None): self.db_api = db_api or glance.db.get_api() self.store_api = store_api or glance_store self.store_utils = store_utils self.plugins = plugins def get_artifact_type_factory(self, context, klass): declarative_factory = domain.ArtifactFactory(context, klass) repo = self.get_artifact_repo(context) dependencies_factory = dependency.ArtifactFactory(declarative_factory, klass, repo) factory = location.ArtifactFactoryProxy(dependencies_factory, context, self.store_api, self.store_utils) updater_factory = updater.ArtifactFactoryProxy(factory) return updater_factory def get_artifact_repo(self, context): artifact_repo = glance.db.ArtifactRepo(context, self.db_api, self.plugins) dependencies_repo = dependency.ArtifactRepo(artifact_repo, self.plugins) repo = location.ArtifactRepoProxy(dependencies_repo, context, self.store_api, self.store_utils) updater_repo = updater.ArtifactRepoProxy(repo) return updater_repo glance-12.0.0/glance/glare/location.py0000664000567000056710000001713512701407047020702 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from glance.common.glare import definitions from glance.common import utils from glance.glare.domain import proxy from glance.i18n import _, _LE, _LW CONF = cfg.CONF LOG = logging.getLogger(__name__) class ArtifactFactoryProxy(proxy.ArtifactFactory): def __init__(self, factory, context, store_api, store_utils): self.context = context self.store_api = store_api self.store_utils = store_utils proxy_kwargs = {'store_api': store_api, 'store_utils': store_utils, 'context': self.context} super(ArtifactFactoryProxy, self).__init__( factory, artifact_proxy_class=ArtifactProxy, artifact_proxy_kwargs=proxy_kwargs) class ArtifactProxy(proxy.Artifact): def __init__(self, artifact, context, store_api, store_utils): self.artifact = artifact self.context = context self.store_api = store_api self.store_utils = store_utils super(ArtifactProxy, self).__init__(artifact, proxy_class=ArtifactBlobProxy, proxy_kwargs={"context": self.context, "store_api": self.store_api}) def set_type_specific_property(self, prop_name, value): if prop_name not in self.artifact.metadata.attributes.blobs: super(ArtifactProxy, self).set_type_specific_property(prop_name, value) return item_key = "%s.%s" % (self.artifact.id, prop_name) # XXX FIXME have to add support for BinaryObjectList properties blob = definitions.Blob(item_key=item_key) blob_proxy = self.helper.proxy(blob) if value is None: for location in blob_proxy.locations: blob_proxy.delete_from_store(location) else: data = value[0] size = value[1] blob_proxy.upload_to_store(data, size) setattr(self.artifact, prop_name, blob) def get_type_specific_property(self, prop_name): base = super(ArtifactProxy, self).get_type_specific_property(prop_name) if base is None: return None if prop_name in self.artifact.metadata.attributes.blobs: if isinstance(self.artifact.metadata.attributes.blobs[prop_name], list): return ArtifactBlobProxyList(self.artifact.id, prop_name, base, self.context, self.store_api) else: return self.helper.proxy(base) else: return base class ArtifactRepoProxy(proxy.ArtifactRepo): def __init__(self, artifact_repo, context, store_api, store_utils): self.context = context self.store_api = store_api proxy_kwargs = {'context': context, 'store_api': store_api, 'store_utils': store_utils} super(ArtifactRepoProxy, self).__init__( artifact_repo, proxy_helper=proxy.ArtifactHelper(ArtifactProxy, proxy_kwargs)) def get(self, *args, **kwargs): return self.helper.proxy(self.base.get(*args, **kwargs)) class ArtifactBlobProxy(proxy.ArtifactBlob): def __init__(self, blob, context, store_api): self.context = context self.store_api = store_api self.blob = blob super(ArtifactBlobProxy, self).__init__(blob) def delete_from_store(self, location): try: ret = self.store_api.delete_from_backend(location['value'], context=self.context) location['status'] = 'deleted' return ret except self.store_api.NotFound: msg = _LW('Failed to delete blob' ' %s in store from URI') % self.blob.id LOG.warn(msg) except self.store_api.StoreDeleteNotSupported as e: LOG.warn(encodeutils.exception_to_unicode(e)) except self.store_api.UnsupportedBackend: exc_type = sys.exc_info()[0].__name__ msg = (_LE('Failed to delete blob' ' %(blob_id)s from store: %(exc)s') % dict(blob_id=self.blob.id, exc=exc_type)) LOG.error(msg) def upload_to_store(self, data, size): if size is None: # NOTE(ativelkov): None is "unknown size" size = 0 location, ret_size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.blob.item_key, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context) self.blob.size = ret_size self.blob.locations = [{'status': 'active', 'value': location}] self.blob.checksum = checksum @property def data_stream(self): if len(self.locations) > 0: err = None try: for location in self.locations: data, size = self.store_api.get_from_backend( location['value'], context=self.context) return data except Exception as e: LOG.warn(_('Get blob %(name)s data failed: ' '%(err)s.') % {'name': self.blob.item_key, 'err': encodeutils.exception_to_unicode(e)}) err = e # tried all locations LOG.error(_LE('Glance tried all active locations to get data ' 'for blob %s ' 'but all have failed.') % self.blob.item_key) raise err class ArtifactBlobProxyList(proxy.List): def __init__(self, artifact_id, prop_name, bloblist, context, store_api): self.artifact_id = artifact_id self.prop_name = prop_name self.context = context self.store_api = store_api super(ArtifactBlobProxyList, self).__init__(bloblist, item_proxy_class=ArtifactBlobProxy, item_proxy_kwargs={'context': context, 'store_api': store_api}) def insert(self, index, value): data = value[0] size = value[1] item_key = "%s.%s.%s" % (self.artifact_id, self.prop_name, uuid.uuid4()) blob = definitions.Blob(item_key=item_key) blob_proxy = self.helper.proxy(blob) blob_proxy.upload_to_store(data, size) super(ArtifactBlobProxyList, self).insert(index, blob_proxy) def __setitem__(self, index, value): blob = self[index] data = value[0] size = value[1] blob.upload_to_store(data, size) glance-12.0.0/glance/glare/updater.py0000664000567000056710000001674412701407047020543 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import exception as exc from glance.glare.domain import proxy from glance.i18n import _ class ArtifactProxy(proxy.Artifact): """A proxy that is capable of modifying an artifact via jsonpatch methods. Currently supported methods are update, remove, replace. """ def __init__(self, artifact): self.artifact = artifact super(ArtifactProxy, self).__init__(artifact) def __getattr__(self, name): if not hasattr(self, name): raise exc.ArtifactInvalidProperty(prop=name) return super(ArtifactProxy, self).__getattr__(name) def _perform_op(self, op, **kwargs): path = kwargs.get("path") value = kwargs.get("value") prop_name, delimiter, path_left = path.lstrip('/').partition('/') super(ArtifactProxy, self).get_type_specific_property(prop_name) if not path_left: return setattr(self, prop_name, value) try: prop = self._get_prop_to_update(prop_name, path_left) # correct path_left and call corresponding update method kwargs["path"] = path_left getattr(prop, op)(path=kwargs["path"], value=kwargs.get("value")) return setattr(self, prop_name, prop) except exc.InvalidJsonPatchPath: # NOTE(ivasilevskaya): here exception is reraised with # 'part of path' substituted with with 'full path' to form a # more relevant message raise exc.InvalidJsonPatchPath( path=path, explanation=_("No property to access")) def _get_prop_to_update(self, prop_name, path): """Proxies properties that can be modified via update request. All properties can be updated save for 'metadata' and blobs. Due to the fact that empty lists and dicts are represented with null values, have to check precise type definition by consulting metadata. """ prop = super(ArtifactProxy, self).get_type_specific_property( prop_name) if (prop_name == "metadata" or prop_name in self.artifact.metadata.attributes.blobs): return prop if not prop: # get correct type for empty list/dict klass = self.artifact.metadata.attributes.all[prop_name] if isinstance(klass, list): prop = [] elif isinstance(klass, dict): prop = {} return wrap_property(prop, path) def replace(self, path, value): self._perform_op("replace", path=path, value=value) def remove(self, path, value=None): self._perform_op("remove", path=path) def add(self, path, value): self._perform_op("add", path=path, value=value) class ArtifactFactoryProxy(proxy.ArtifactFactory): def __init__(self, factory): super(ArtifactFactoryProxy, self).__init__(factory) class ArtifactRepoProxy(proxy.ArtifactRepo): def __init__(self, repo): super(ArtifactRepoProxy, self).__init__( repo, item_proxy_class=ArtifactProxy) def wrap_property(prop_value, full_path): if isinstance(prop_value, list): return ArtifactListPropertyProxy(prop_value, full_path) if isinstance(prop_value, dict): return ArtifactDictPropertyProxy(prop_value, full_path) # no other types are supported raise exc.InvalidJsonPatchPath(path=full_path) class ArtifactListPropertyProxy(proxy.List): """A class to wrap a list property. Makes possible to modify the property value via supported jsonpatch requests (update/remove/replace). """ def __init__(self, prop_value, path): super(ArtifactListPropertyProxy, self).__init__( prop_value) def _proc_key(self, idx_str, should_exist=True): """JsonPatchUpdateMixin method overload. Only integers less than current array length and '-' (last elem) in path are allowed. Raises an InvalidJsonPatchPath exception if any of the conditions above are not met. """ if idx_str == '-': return len(self) - 1 try: idx = int(idx_str) if not should_exist and len(self) == 0: return 0 if len(self) < idx + 1: msg = _("Array has no element at position %d") % idx raise exc.InvalidJsonPatchPath(explanation=msg, path=idx) return idx except (ValueError, TypeError): msg = _("Not an array idx '%s'") % idx_str raise exc.InvalidJsonPatchPath(explanation=msg, path=idx_str) def add(self, path, value): # by now arrays can't contain complex structures (due to Declarative # Framework limitations and DB storage model), # so will 'path' == idx equality is implied. idx = self._proc_key(path, False) if idx == len(self) - 1: self.append(value) else: self.insert(idx, value) return self.base def remove(self, path, value=None): # by now arrays can't contain complex structures, so will imply that # 'path' == idx [see comment for add()] del self[self._proc_key(path)] return self.base def replace(self, path, value): # by now arrays can't contain complex structures, so will imply that # 'path' == idx [see comment for add()] self[self._proc_key(path)] = value return self.base class ArtifactDictPropertyProxy(proxy.Dict): """A class to wrap a dict property. Makes possible to modify the property value via supported jsonpatch requests (update/remove/replace). """ def __init__(self, prop_value, path): super(ArtifactDictPropertyProxy, self).__init__( prop_value) def _proc_key(self, key_str, should_exist=True): """JsonPatchUpdateMixin method overload""" if should_exist and key_str not in self.keys(): msg = _("No such key '%s' in a dict") % key_str raise exc.InvalidJsonPatchPath(path=key_str, explanation=msg) return key_str def replace(self, path, value): start, delimiter, rest = path.partition('/') # the full path MUST exist in replace operation, so let's check # that such key exists key = self._proc_key(start) if not rest: self[key] = value else: prop = wrap_property(self[key], rest) self[key] = prop.replace(rest, value) def remove(self, path, value=None): start, delimiter, rest = path.partition('/') key = self._proc_key(start) if not rest: del self[key] else: prop = wrap_property(self[key], rest) prop.remove(rest) def add(self, path, value): start, delimiter, rest = path.partition('/') if not rest: self[start] = value else: key = self._proc_key(start) prop = wrap_property(self[key], rest) self[key] = prop.add(rest, value) glance-12.0.0/glance/quota/0000775000567000056710000000000012701407204016543 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/quota/__init__.py0000664000567000056710000003342712701407047020672 0ustar jenkinsjenkins00000000000000# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import glance_store as store from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils import glance.api.common import glance.common.exception as exception from glance.common import utils import glance.domain import glance.domain.proxy from glance.i18n import _, _LI LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('image_member_quota', 'glance.common.config') CONF.import_opt('image_property_quota', 'glance.common.config') CONF.import_opt('image_tag_quota', 'glance.common.config') def _enforce_image_tag_quota(tags): if CONF.image_tag_quota < 0: # If value is negative, allow unlimited number of tags return if not tags: return if len(tags) > CONF.image_tag_quota: raise exception.ImageTagLimitExceeded(attempted=len(tags), maximum=CONF.image_tag_quota) def _calc_required_size(context, image, locations): required_size = None if image.size: required_size = image.size * len(locations) else: for location in locations: size_from_backend = None try: size_from_backend = store.get_size_from_backend( location['url'], context=context) except (store.UnknownScheme, store.NotFound): pass except store.BadStoreUri: raise exception.BadStoreUri if size_from_backend: required_size = size_from_backend * len(locations) break return required_size def _enforce_image_location_quota(image, locations, is_setter=False): if CONF.image_location_quota < 0: # If value is negative, allow unlimited number of locations return attempted = len(image.locations) + len(locations) attempted = attempted if not is_setter else len(locations) maximum = CONF.image_location_quota if attempted > maximum: raise exception.ImageLocationLimitExceeded(attempted=attempted, maximum=maximum) class ImageRepoProxy(glance.domain.proxy.Repo): def __init__(self, image_repo, context, db_api, store_utils): self.image_repo = image_repo self.db_api = db_api proxy_kwargs = {'context': context, 'db_api': db_api, 'store_utils': store_utils} super(ImageRepoProxy, self).__init__(image_repo, item_proxy_class=ImageProxy, item_proxy_kwargs=proxy_kwargs) def _enforce_image_property_quota(self, attempted): if CONF.image_property_quota < 0: # If value is negative, allow unlimited number of properties return maximum = CONF.image_property_quota if attempted > maximum: kwargs = {'attempted': attempted, 'maximum': maximum} exc = exception.ImagePropertyLimitExceeded(**kwargs) LOG.debug(encodeutils.exception_to_unicode(exc)) raise exc def save(self, image, from_state=None): if image.added_new_properties(): self._enforce_image_property_quota(len(image.extra_properties)) return super(ImageRepoProxy, self).save(image, from_state=from_state) def add(self, image): self._enforce_image_property_quota(len(image.extra_properties)) return super(ImageRepoProxy, self).add(image) class ImageFactoryProxy(glance.domain.proxy.ImageFactory): def __init__(self, factory, context, db_api, store_utils): proxy_kwargs = {'context': context, 'db_api': db_api, 'store_utils': store_utils} super(ImageFactoryProxy, self).__init__(factory, proxy_class=ImageProxy, proxy_kwargs=proxy_kwargs) def new_image(self, **kwargs): tags = kwargs.pop('tags', set([])) _enforce_image_tag_quota(tags) return super(ImageFactoryProxy, self).new_image(tags=tags, **kwargs) class QuotaImageTagsProxy(object): def __init__(self, orig_set): if orig_set is None: orig_set = set([]) self.tags = orig_set def add(self, item): self.tags.add(item) _enforce_image_tag_quota(self.tags) def __cast__(self, *args, **kwargs): return self.tags.__cast__(*args, **kwargs) def __contains__(self, *args, **kwargs): return self.tags.__contains__(*args, **kwargs) def __eq__(self, other): return self.tags == other def __iter__(self, *args, **kwargs): return self.tags.__iter__(*args, **kwargs) def __len__(self, *args, **kwargs): return self.tags.__len__(*args, **kwargs) def __getattr__(self, name): return getattr(self.tags, name) class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory): def __init__(self, member_factory, context, db_api, store_utils): self.db_api = db_api self.context = context proxy_kwargs = {'context': context, 'db_api': db_api, 'store_utils': store_utils} super(ImageMemberFactoryProxy, self).__init__( member_factory, proxy_class=ImageMemberProxy, proxy_kwargs=proxy_kwargs) def _enforce_image_member_quota(self, image): if CONF.image_member_quota < 0: # If value is negative, allow unlimited number of members return current_member_count = self.db_api.image_member_count(self.context, image.image_id) attempted = current_member_count + 1 maximum = CONF.image_member_quota if attempted > maximum: raise exception.ImageMemberLimitExceeded(attempted=attempted, maximum=maximum) def new_image_member(self, image, member_id): self._enforce_image_member_quota(image) return super(ImageMemberFactoryProxy, self).new_image_member(image, member_id) class QuotaImageLocationsProxy(object): def __init__(self, image, context, db_api): self.image = image self.context = context self.db_api = db_api self.locations = image.locations def __cast__(self, *args, **kwargs): return self.locations.__cast__(*args, **kwargs) def __contains__(self, *args, **kwargs): return self.locations.__contains__(*args, **kwargs) def __delitem__(self, *args, **kwargs): return self.locations.__delitem__(*args, **kwargs) def __delslice__(self, *args, **kwargs): return self.locations.__delslice__(*args, **kwargs) def __eq__(self, other): return self.locations == other def __getitem__(self, *args, **kwargs): return self.locations.__getitem__(*args, **kwargs) def __iadd__(self, other): if not hasattr(other, '__iter__'): raise TypeError() self._check_user_storage_quota(other) return self.locations.__iadd__(other) def __iter__(self, *args, **kwargs): return self.locations.__iter__(*args, **kwargs) def __len__(self, *args, **kwargs): return self.locations.__len__(*args, **kwargs) def __setitem__(self, key, value): return self.locations.__setitem__(key, value) def count(self, *args, **kwargs): return self.locations.count(*args, **kwargs) def index(self, *args, **kwargs): return self.locations.index(*args, **kwargs) def pop(self, *args, **kwargs): return self.locations.pop(*args, **kwargs) def remove(self, *args, **kwargs): return self.locations.remove(*args, **kwargs) def reverse(self, *args, **kwargs): return self.locations.reverse(*args, **kwargs) def _check_user_storage_quota(self, locations): required_size = _calc_required_size(self.context, self.image, locations) glance.api.common.check_quota(self.context, required_size, self.db_api) _enforce_image_location_quota(self.image, locations) def __copy__(self): return type(self)(self.image, self.context, self.db_api) def __deepcopy__(self, memo): # NOTE(zhiyan): Only copy location entries, others can be reused. self.image.locations = copy.deepcopy(self.locations, memo) return type(self)(self.image, self.context, self.db_api) def append(self, object): self._check_user_storage_quota([object]) return self.locations.append(object) def insert(self, index, object): self._check_user_storage_quota([object]) return self.locations.insert(index, object) def extend(self, iter): self._check_user_storage_quota(iter) return self.locations.extend(iter) class ImageProxy(glance.domain.proxy.Image): def __init__(self, image, context, db_api, store_utils): self.image = image self.context = context self.db_api = db_api self.store_utils = store_utils super(ImageProxy, self).__init__(image) self.orig_props = set(image.extra_properties.keys()) def set_data(self, data, size=None): remaining = glance.api.common.check_quota( self.context, size, self.db_api, image_id=self.image.image_id) if remaining is not None: # NOTE(jbresnah) we are trying to enforce a quota, put a limit # reader on the data data = utils.LimitingReader(data, remaining) try: self.image.set_data(data, size=size) except exception.ImageSizeLimitExceeded: raise exception.StorageQuotaFull(image_size=size, remaining=remaining) # NOTE(jbresnah) If two uploads happen at the same time and neither # properly sets the size attribute[1] then there is a race condition # that will allow for the quota to be broken[2]. Thus we must recheck # the quota after the upload and thus after we know the size. # # Also, when an upload doesn't set the size properly then the call to # check_quota above returns None and so utils.LimitingReader is not # used above. Hence the store (e.g. filesystem store) may have to # download the entire file before knowing the actual file size. Here # also we need to check for the quota again after the image has been # downloaded to the store. # # [1] For e.g. when using chunked transfers the 'Content-Length' # header is not set. # [2] For e.g.: # - Upload 1 does not exceed quota but upload 2 exceeds quota. # Both uploads are to different locations # - Upload 2 completes before upload 1 and writes image.size. # - Immediately, upload 1 completes and (over)writes image.size # with the smaller size. # - Now, to glance, image has not exceeded quota but, in # reality, the quota has been exceeded. try: glance.api.common.check_quota( self.context, self.image.size, self.db_api, image_id=self.image.image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info(_LI('Cleaning up %s after exceeding the quota.'), self.image.image_id) self.store_utils.safe_delete_from_backend( self.context, self.image.image_id, self.image.locations[0]) @property def tags(self): return QuotaImageTagsProxy(self.image.tags) @tags.setter def tags(self, value): _enforce_image_tag_quota(value) self.image.tags = value @property def locations(self): return QuotaImageLocationsProxy(self.image, self.context, self.db_api) @locations.setter def locations(self, value): _enforce_image_location_quota(self.image, value, is_setter=True) if not isinstance(value, (list, QuotaImageLocationsProxy)): raise exception.Invalid(_('Invalid locations: %s') % value) required_size = _calc_required_size(self.context, self.image, value) glance.api.common.check_quota( self.context, required_size, self.db_api, image_id=self.image.image_id) self.image.locations = value def added_new_properties(self): current_props = set(self.image.extra_properties.keys()) return bool(current_props.difference(self.orig_props)) class ImageMemberProxy(glance.domain.proxy.ImageMember): def __init__(self, image_member, context, db_api, store_utils): self.image_member = image_member self.context = context self.db_api = db_api self.store_utils = store_utils super(ImageMemberProxy, self).__init__(image_member) glance-12.0.0/glance/domain/0000775000567000056710000000000012701407204016661 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/domain/__init__.py0000664000567000056710000005412712701407047021010 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils import six from glance.common import exception from glance.common import timeutils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('task_executor', 'glance.common.config', group='task') _delayed_delete_imported = False def _import_delayed_delete(): # glance_store (indirectly) imports glance.domain therefore we can't put # the CONF.import_opt outside - we have to do it in a convoluted/indirect # way! global _delayed_delete_imported if not _delayed_delete_imported: CONF.import_opt('delayed_delete', 'glance_store') _delayed_delete_imported = True class ImageFactory(object): _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', 'size', 'virtual_size'] _reserved_properties = ['owner', 'locations', 'deleted', 'deleted_at', 'direct_url', 'self', 'file', 'schema'] def _check_readonly(self, kwargs): for key in self._readonly_properties: if key in kwargs: raise exception.ReadonlyProperty(property=key) def _check_unexpected(self, kwargs): if kwargs: msg = _('new_image() got unexpected keywords %s') raise TypeError(msg % kwargs.keys()) def _check_reserved(self, properties): if properties is not None: for key in self._reserved_properties: if key in properties: raise exception.ReservedProperty(property=key) def new_image(self, image_id=None, name=None, visibility='private', min_disk=0, min_ram=0, protected=False, owner=None, disk_format=None, container_format=None, extra_properties=None, tags=None, **other_args): extra_properties = extra_properties or {} self._check_readonly(other_args) self._check_unexpected(other_args) self._check_reserved(extra_properties) if image_id is None: image_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at status = 'queued' return Image(image_id=image_id, name=name, status=status, created_at=created_at, updated_at=updated_at, visibility=visibility, min_disk=min_disk, min_ram=min_ram, protected=protected, owner=owner, disk_format=disk_format, container_format=container_format, extra_properties=extra_properties, tags=tags or []) class Image(object): valid_state_targets = { # Each key denotes a "current" state for the image. Corresponding # values list the valid states to which we can jump from that "current" # state. # NOTE(flwang): In v2, we are deprecating the 'killed' status, so it's # allowed to restore image from 'saving' to 'queued' so that upload # can be retried. 'queued': ('saving', 'active', 'deleted'), 'saving': ('active', 'killed', 'deleted', 'queued'), 'active': ('pending_delete', 'deleted', 'deactivated'), 'killed': ('deleted',), 'pending_delete': ('deleted',), 'deleted': (), 'deactivated': ('active', 'deleted'), } def __init__(self, image_id, status, created_at, updated_at, **kwargs): self.image_id = image_id self.status = status self.created_at = created_at self.updated_at = updated_at self.name = kwargs.pop('name', None) self.visibility = kwargs.pop('visibility', 'private') self.min_disk = kwargs.pop('min_disk', 0) self.min_ram = kwargs.pop('min_ram', 0) self.protected = kwargs.pop('protected', False) self.locations = kwargs.pop('locations', []) self.checksum = kwargs.pop('checksum', None) self.owner = kwargs.pop('owner', None) self._disk_format = kwargs.pop('disk_format', None) self._container_format = kwargs.pop('container_format', None) self.size = kwargs.pop('size', None) self.virtual_size = kwargs.pop('virtual_size', None) extra_properties = kwargs.pop('extra_properties', {}) self.extra_properties = ExtraProperties(extra_properties) self.tags = kwargs.pop('tags', []) if kwargs: message = _("__init__() got unexpected keyword argument '%s'") raise TypeError(message % list(kwargs.keys())[0]) @property def status(self): return self._status @status.setter def status(self, status): has_status = hasattr(self, '_status') if has_status: if status not in self.valid_state_targets[self._status]: kw = {'cur_status': self._status, 'new_status': status} e = exception.InvalidImageStatusTransition(**kw) LOG.debug(e) raise e if self._status == 'queued' and status in ('saving', 'active'): missing = [k for k in ['disk_format', 'container_format'] if not getattr(self, k)] if len(missing) > 0: if len(missing) == 1: msg = _('Property %s must be set prior to ' 'saving data.') else: msg = _('Properties %s must be set prior to ' 'saving data.') raise ValueError(msg % ', '.join(missing)) # NOTE(flwang): Image size should be cleared as long as the image # status is updated to 'queued' if status == 'queued': self.size = None self.virtual_size = None self._status = status @property def visibility(self): return self._visibility @visibility.setter def visibility(self, visibility): if visibility not in ('public', 'private'): raise ValueError(_('Visibility must be either "public" ' 'or "private"')) self._visibility = visibility @property def tags(self): return self._tags @tags.setter def tags(self, value): self._tags = set(value) @property def container_format(self): return self._container_format @container_format.setter def container_format(self, value): if hasattr(self, '_container_format') and self.status != 'queued': msg = _("Attribute container_format can be only replaced " "for a queued image.") raise exception.Forbidden(message=msg) self._container_format = value @property def disk_format(self): return self._disk_format @disk_format.setter def disk_format(self, value): if hasattr(self, '_disk_format') and self.status != 'queued': msg = _("Attribute disk_format can be only replaced " "for a queued image.") raise exception.Forbidden(message=msg) self._disk_format = value @property def min_disk(self): return self._min_disk @min_disk.setter def min_disk(self, value): if value and value < 0: extra_msg = _('Cannot be a negative value') raise exception.InvalidParameterValue(value=value, param='min_disk', extra_msg=extra_msg) self._min_disk = value @property def min_ram(self): return self._min_ram @min_ram.setter def min_ram(self, value): if value and value < 0: extra_msg = _('Cannot be a negative value') raise exception.InvalidParameterValue(value=value, param='min_ram', extra_msg=extra_msg) self._min_ram = value def delete(self): if self.protected: raise exception.ProtectedImageDelete(image_id=self.image_id) if CONF.delayed_delete and self.locations: self.status = 'pending_delete' else: self.status = 'deleted' def deactivate(self): if self.status == 'active': self.status = 'deactivated' elif self.status == 'deactivated': # Noop if already deactive pass else: LOG.debug("Not allowed to deactivate image in status '%s'", self.status) msg = (_("Not allowed to deactivate image in status '%s'") % self.status) raise exception.Forbidden(message=msg) def reactivate(self): if self.status == 'deactivated': self.status = 'active' elif self.status == 'active': # Noop if already active pass else: LOG.debug("Not allowed to reactivate image in status '%s'", self.status) msg = (_("Not allowed to reactivate image in status '%s'") % self.status) raise exception.Forbidden(message=msg) def get_data(self, *args, **kwargs): raise NotImplementedError() def set_data(self, data, size=None): raise NotImplementedError() class ExtraProperties(collections.MutableMapping, dict): def __getitem__(self, key): return dict.__getitem__(self, key) def __setitem__(self, key, value): return dict.__setitem__(self, key, value) def __delitem__(self, key): return dict.__delitem__(self, key) def __eq__(self, other): if isinstance(other, ExtraProperties): return dict(self).__eq__(dict(other)) elif isinstance(other, dict): return dict(self).__eq__(other) else: return False def __len__(self): return dict(self).__len__() def keys(self): return dict(self).keys() class ImageMembership(object): def __init__(self, image_id, member_id, created_at, updated_at, id=None, status=None): self.id = id self.image_id = image_id self.member_id = member_id self.created_at = created_at self.updated_at = updated_at self.status = status @property def status(self): return self._status @status.setter def status(self, status): if status not in ('pending', 'accepted', 'rejected'): msg = _('Status must be "pending", "accepted" or "rejected".') raise ValueError(msg) self._status = status class ImageMemberFactory(object): def new_image_member(self, image, member_id): created_at = timeutils.utcnow() updated_at = created_at return ImageMembership(image_id=image.image_id, member_id=member_id, created_at=created_at, updated_at=updated_at, status='pending') class Task(object): _supported_task_type = ('import',) _supported_task_status = ('pending', 'processing', 'success', 'failure') def __init__(self, task_id, task_type, status, owner, expires_at, created_at, updated_at, task_input, result, message): if task_type not in self._supported_task_type: raise exception.InvalidTaskType(task_type) if status not in self._supported_task_status: raise exception.InvalidTaskStatus(status) self.task_id = task_id self._status = status self.type = task_type self.owner = owner self.expires_at = expires_at # NOTE(nikhil): We use '_time_to_live' to determine how long a # task should live from the time it succeeds or fails. task_time_to_live = CONF.task.task_time_to_live self._time_to_live = datetime.timedelta(hours=task_time_to_live) self.created_at = created_at self.updated_at = updated_at self.task_input = task_input self.result = result self.message = message @property def status(self): return self._status @property def message(self): return self._message @message.setter def message(self, message): if message: self._message = six.text_type(message) else: self._message = six.text_type('') def _validate_task_status_transition(self, cur_status, new_status): valid_transitions = { 'pending': ['processing', 'failure'], 'processing': ['success', 'failure'], 'success': [], 'failure': [], } if new_status in valid_transitions[cur_status]: return True else: return False def _set_task_status(self, new_status): if self._validate_task_status_transition(self.status, new_status): self._status = new_status LOG.info(_LI("Task [%(task_id)s] status changing from " "%(cur_status)s to %(new_status)s"), {'task_id': self.task_id, 'cur_status': self.status, 'new_status': new_status}) self._status = new_status else: LOG.error(_LE("Task [%(task_id)s] status failed to change from " "%(cur_status)s to %(new_status)s"), {'task_id': self.task_id, 'cur_status': self.status, 'new_status': new_status}) raise exception.InvalidTaskStatusTransition( cur_status=self.status, new_status=new_status ) def begin_processing(self): new_status = 'processing' self._set_task_status(new_status) def succeed(self, result): new_status = 'success' self.result = result self._set_task_status(new_status) self.expires_at = timeutils.utcnow() + self._time_to_live def fail(self, message): new_status = 'failure' self.message = message self._set_task_status(new_status) self.expires_at = timeutils.utcnow() + self._time_to_live def run(self, executor): executor.begin_processing(self.task_id) class TaskStub(object): def __init__(self, task_id, task_type, status, owner, expires_at, created_at, updated_at): self.task_id = task_id self._status = status self.type = task_type self.owner = owner self.expires_at = expires_at self.created_at = created_at self.updated_at = updated_at @property def status(self): return self._status class TaskFactory(object): def new_task(self, task_type, owner, task_input=None, **kwargs): task_id = str(uuid.uuid4()) status = 'pending' # Note(nikhil): expires_at would be set on the task, only when it # succeeds or fails. expires_at = None created_at = timeutils.utcnow() updated_at = created_at return Task( task_id, task_type, status, owner, expires_at, created_at, updated_at, task_input, kwargs.get('result'), kwargs.get('message') ) class TaskExecutorFactory(object): eventlet_deprecation_warned = False def __init__(self, task_repo, image_repo, image_factory): self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory def new_task_executor(self, context): try: # NOTE(flaper87): Backwards compatibility layer. # It'll allow us to provide a deprecation path to # users that are currently consuming the `eventlet` # executor. task_executor = CONF.task.task_executor if task_executor == 'eventlet': # NOTE(jokke): Making sure we do not log the deprecation # warning 1000 times or anything crazy like that. if not TaskExecutorFactory.eventlet_deprecation_warned: msg = _LW("The `eventlet` executor has been deprecated. " "Use `taskflow` instead.") LOG.warn(msg) TaskExecutorFactory.eventlet_deprecation_warned = True task_executor = 'taskflow' executor_cls = ('glance.async.%s_executor.' 'TaskExecutor' % task_executor) LOG.debug("Loading %s executor", task_executor) executor = importutils.import_class(executor_cls) return executor(context, self.task_repo, self.image_repo, self.image_factory) except ImportError: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to load the %s executor provided " "in the config.") % CONF.task.task_executor) class MetadefNamespace(object): def __init__(self, namespace_id, namespace, display_name, description, owner, visibility, protected, created_at, updated_at): self.namespace_id = namespace_id self.namespace = namespace self.display_name = display_name self.description = description self.owner = owner self.visibility = visibility or "private" self.protected = protected or False self.created_at = created_at self.updated_at = updated_at def delete(self): if self.protected: raise exception.ProtectedMetadefNamespaceDelete( namespace=self.namespace) class MetadefNamespaceFactory(object): def new_namespace(self, namespace, owner, **kwargs): namespace_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefNamespace( namespace_id, namespace, kwargs.get('display_name'), kwargs.get('description'), owner, kwargs.get('visibility'), kwargs.get('protected'), created_at, updated_at ) class MetadefObject(object): def __init__(self, namespace, object_id, name, created_at, updated_at, required, description, properties): self.namespace = namespace self.object_id = object_id self.name = name self.created_at = created_at self.updated_at = updated_at self.required = required self.description = description self.properties = properties def delete(self): if self.namespace.protected: raise exception.ProtectedMetadefObjectDelete(object_name=self.name) class MetadefObjectFactory(object): def new_object(self, namespace, name, **kwargs): object_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefObject( namespace, object_id, name, created_at, updated_at, kwargs.get('required'), kwargs.get('description'), kwargs.get('properties') ) class MetadefResourceType(object): def __init__(self, namespace, name, prefix, properties_target, created_at, updated_at): self.namespace = namespace self.name = name self.prefix = prefix self.properties_target = properties_target self.created_at = created_at self.updated_at = updated_at def delete(self): if self.namespace.protected: raise exception.ProtectedMetadefResourceTypeAssociationDelete( resource_type=self.name) class MetadefResourceTypeFactory(object): def new_resource_type(self, namespace, name, **kwargs): created_at = timeutils.utcnow() updated_at = created_at return MetadefResourceType( namespace, name, kwargs.get('prefix'), kwargs.get('properties_target'), created_at, updated_at ) class MetadefProperty(object): def __init__(self, namespace, property_id, name, schema): self.namespace = namespace self.property_id = property_id self.name = name self.schema = schema def delete(self): if self.namespace.protected: raise exception.ProtectedMetadefNamespacePropDelete( property_name=self.name) class MetadefPropertyFactory(object): def new_namespace_property(self, namespace, name, schema, **kwargs): property_id = str(uuid.uuid4()) return MetadefProperty( namespace, property_id, name, schema ) class MetadefTag(object): def __init__(self, namespace, tag_id, name, created_at, updated_at): self.namespace = namespace self.tag_id = tag_id self.name = name self.created_at = created_at self.updated_at = updated_at def delete(self): if self.namespace.protected: raise exception.ProtectedMetadefTagDelete(tag_name=self.name) class MetadefTagFactory(object): def new_tag(self, namespace, name, **kwargs): tag_id = str(uuid.uuid4()) created_at = timeutils.utcnow() updated_at = created_at return MetadefTag( namespace, tag_id, name, created_at, updated_at ) glance-12.0.0/glance/domain/proxy.py0000664000567000056710000004573112701407047020433 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def _proxy(target, attr): def get_attr(self): return getattr(getattr(self, target), attr) def set_attr(self, value): return setattr(getattr(self, target), attr, value) def del_attr(self): return delattr(getattr(self, target), attr) return property(get_attr, set_attr, del_attr) class Helper(object): def __init__(self, proxy_class=None, proxy_kwargs=None): self.proxy_class = proxy_class self.proxy_kwargs = proxy_kwargs or {} def proxy(self, obj): if obj is None or self.proxy_class is None: return obj return self.proxy_class(obj, **self.proxy_kwargs) def unproxy(self, obj): if obj is None or self.proxy_class is None: return obj return obj.base class TaskRepo(object): def __init__(self, base, task_proxy_class=None, task_proxy_kwargs=None): self.base = base self.task_proxy_helper = Helper(task_proxy_class, task_proxy_kwargs) def get(self, task_id): task = self.base.get(task_id) return self.task_proxy_helper.proxy(task) def add(self, task): self.base.add(self.task_proxy_helper.unproxy(task)) def save(self, task): self.base.save(self.task_proxy_helper.unproxy(task)) def remove(self, task): base_task = self.task_proxy_helper.unproxy(task) self.base.remove(base_task) class TaskStubRepo(object): def __init__(self, base, task_stub_proxy_class=None, task_stub_proxy_kwargs=None): self.base = base self.task_stub_proxy_helper = Helper(task_stub_proxy_class, task_stub_proxy_kwargs) def list(self, *args, **kwargs): tasks = self.base.list(*args, **kwargs) return [self.task_stub_proxy_helper.proxy(task) for task in tasks] class Repo(object): def __init__(self, base, item_proxy_class=None, item_proxy_kwargs=None): self.base = base self.helper = Helper(item_proxy_class, item_proxy_kwargs) def get(self, item_id): return self.helper.proxy(self.base.get(item_id)) def list(self, *args, **kwargs): items = self.base.list(*args, **kwargs) return [self.helper.proxy(item) for item in items] def add(self, item): base_item = self.helper.unproxy(item) result = self.base.add(base_item) return self.helper.proxy(result) def save(self, item, from_state=None): base_item = self.helper.unproxy(item) result = self.base.save(base_item, from_state=from_state) return self.helper.proxy(result) def remove(self, item): base_item = self.helper.unproxy(item) result = self.base.remove(base_item) return self.helper.proxy(result) class MemberRepo(object): def __init__(self, image, base, member_proxy_class=None, member_proxy_kwargs=None): self.image = image self.base = base self.member_proxy_helper = Helper(member_proxy_class, member_proxy_kwargs) def get(self, member_id): member = self.base.get(member_id) return self.member_proxy_helper.proxy(member) def add(self, member): self.base.add(self.member_proxy_helper.unproxy(member)) def list(self, *args, **kwargs): members = self.base.list(*args, **kwargs) return [self.member_proxy_helper.proxy(member) for member in members] def remove(self, member): base_item = self.member_proxy_helper.unproxy(member) result = self.base.remove(base_item) return self.member_proxy_helper.proxy(result) def save(self, member, from_state=None): base_item = self.member_proxy_helper.unproxy(member) result = self.base.save(base_item, from_state=from_state) return self.member_proxy_helper.proxy(result) class ImageFactory(object): def __init__(self, base, proxy_class=None, proxy_kwargs=None): self.helper = Helper(proxy_class, proxy_kwargs) self.base = base def new_image(self, **kwargs): return self.helper.proxy(self.base.new_image(**kwargs)) class ImageMembershipFactory(object): def __init__(self, base, proxy_class=None, proxy_kwargs=None): self.helper = Helper(proxy_class, proxy_kwargs) self.base = base def new_image_member(self, image, member, **kwargs): return self.helper.proxy(self.base.new_image_member(image, member, **kwargs)) class Image(object): def __init__(self, base, member_repo_proxy_class=None, member_repo_proxy_kwargs=None): self.base = base self.helper = Helper(member_repo_proxy_class, member_repo_proxy_kwargs) name = _proxy('base', 'name') image_id = _proxy('base', 'image_id') status = _proxy('base', 'status') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') visibility = _proxy('base', 'visibility') min_disk = _proxy('base', 'min_disk') min_ram = _proxy('base', 'min_ram') protected = _proxy('base', 'protected') locations = _proxy('base', 'locations') checksum = _proxy('base', 'checksum') owner = _proxy('base', 'owner') disk_format = _proxy('base', 'disk_format') container_format = _proxy('base', 'container_format') size = _proxy('base', 'size') virtual_size = _proxy('base', 'virtual_size') extra_properties = _proxy('base', 'extra_properties') tags = _proxy('base', 'tags') def delete(self): self.base.delete() def deactivate(self): self.base.deactivate() def reactivate(self): self.base.reactivate() def set_data(self, data, size=None): self.base.set_data(data, size) def get_data(self, *args, **kwargs): return self.base.get_data(*args, **kwargs) class ImageMember(object): def __init__(self, base): self.base = base id = _proxy('base', 'id') image_id = _proxy('base', 'image_id') member_id = _proxy('base', 'member_id') status = _proxy('base', 'status') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') class Task(object): def __init__(self, base): self.base = base task_id = _proxy('base', 'task_id') type = _proxy('base', 'type') status = _proxy('base', 'status') owner = _proxy('base', 'owner') expires_at = _proxy('base', 'expires_at') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') task_input = _proxy('base', 'task_input') result = _proxy('base', 'result') message = _proxy('base', 'message') def begin_processing(self): self.base.begin_processing() def succeed(self, result): self.base.succeed(result) def fail(self, message): self.base.fail(message) def run(self, executor): self.base.run(executor) class TaskStub(object): def __init__(self, base): self.base = base task_id = _proxy('base', 'task_id') type = _proxy('base', 'type') status = _proxy('base', 'status') owner = _proxy('base', 'owner') expires_at = _proxy('base', 'expires_at') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') class TaskFactory(object): def __init__(self, base, task_proxy_class=None, task_proxy_kwargs=None): self.task_helper = Helper(task_proxy_class, task_proxy_kwargs) self.base = base def new_task(self, **kwargs): t = self.base.new_task(**kwargs) return self.task_helper.proxy(t) # Metadef Namespace classes class MetadefNamespaceRepo(object): def __init__(self, base, namespace_proxy_class=None, namespace_proxy_kwargs=None): self.base = base self.namespace_proxy_helper = Helper(namespace_proxy_class, namespace_proxy_kwargs) def get(self, namespace): namespace_obj = self.base.get(namespace) return self.namespace_proxy_helper.proxy(namespace_obj) def add(self, namespace): self.base.add(self.namespace_proxy_helper.unproxy(namespace)) def list(self, *args, **kwargs): namespaces = self.base.list(*args, **kwargs) return [self.namespace_proxy_helper.proxy(namespace) for namespace in namespaces] def remove(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.namespace_proxy_helper.proxy(result) def remove_objects(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.remove_objects(base_item) return self.namespace_proxy_helper.proxy(result) def remove_properties(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.remove_properties(base_item) return self.namespace_proxy_helper.proxy(result) def remove_tags(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.remove_tags(base_item) return self.namespace_proxy_helper.proxy(result) def save(self, item): base_item = self.namespace_proxy_helper.unproxy(item) result = self.base.save(base_item) return self.namespace_proxy_helper.proxy(result) class MetadefNamespace(object): def __init__(self, base): self.base = base namespace_id = _proxy('base', 'namespace_id') namespace = _proxy('base', 'namespace') display_name = _proxy('base', 'display_name') description = _proxy('base', 'description') owner = _proxy('base', 'owner') visibility = _proxy('base', 'visibility') protected = _proxy('base', 'protected') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') def delete(self): self.base.delete() class MetadefNamespaceFactory(object): def __init__(self, base, meta_namespace_proxy_class=None, meta_namespace_proxy_kwargs=None): self.meta_namespace_helper = Helper(meta_namespace_proxy_class, meta_namespace_proxy_kwargs) self.base = base def new_namespace(self, **kwargs): t = self.base.new_namespace(**kwargs) return self.meta_namespace_helper.proxy(t) # Metadef object classes class MetadefObjectRepo(object): def __init__(self, base, object_proxy_class=None, object_proxy_kwargs=None): self.base = base self.object_proxy_helper = Helper(object_proxy_class, object_proxy_kwargs) def get(self, namespace, object_name): meta_object = self.base.get(namespace, object_name) return self.object_proxy_helper.proxy(meta_object) def add(self, meta_object): self.base.add(self.object_proxy_helper.unproxy(meta_object)) def list(self, *args, **kwargs): objects = self.base.list(*args, **kwargs) return [self.object_proxy_helper.proxy(meta_object) for meta_object in objects] def remove(self, item): base_item = self.object_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.object_proxy_helper.proxy(result) def save(self, item): base_item = self.object_proxy_helper.unproxy(item) result = self.base.save(base_item) return self.object_proxy_helper.proxy(result) class MetadefObject(object): def __init__(self, base): self.base = base namespace = _proxy('base', 'namespace') object_id = _proxy('base', 'object_id') name = _proxy('base', 'name') required = _proxy('base', 'required') description = _proxy('base', 'description') properties = _proxy('base', 'properties') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') def delete(self): self.base.delete() class MetadefObjectFactory(object): def __init__(self, base, meta_object_proxy_class=None, meta_object_proxy_kwargs=None): self.meta_object_helper = Helper(meta_object_proxy_class, meta_object_proxy_kwargs) self.base = base def new_object(self, **kwargs): t = self.base.new_object(**kwargs) return self.meta_object_helper.proxy(t) # Metadef ResourceType classes class MetadefResourceTypeRepo(object): def __init__(self, base, resource_type_proxy_class=None, resource_type_proxy_kwargs=None): self.base = base self.resource_type_proxy_helper = Helper(resource_type_proxy_class, resource_type_proxy_kwargs) def add(self, meta_resource_type): self.base.add(self.resource_type_proxy_helper.unproxy( meta_resource_type)) def get(self, *args, **kwargs): resource_type = self.base.get(*args, **kwargs) return self.resource_type_proxy_helper.proxy(resource_type) def list(self, *args, **kwargs): resource_types = self.base.list(*args, **kwargs) return [self.resource_type_proxy_helper.proxy(resource_type) for resource_type in resource_types] def remove(self, item): base_item = self.resource_type_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.resource_type_proxy_helper.proxy(result) class MetadefResourceType(object): def __init__(self, base): self.base = base namespace = _proxy('base', 'namespace') name = _proxy('base', 'name') prefix = _proxy('base', 'prefix') properties_target = _proxy('base', 'properties_target') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') def delete(self): self.base.delete() class MetadefResourceTypeFactory(object): def __init__(self, base, resource_type_proxy_class=None, resource_type_proxy_kwargs=None): self.resource_type_helper = Helper(resource_type_proxy_class, resource_type_proxy_kwargs) self.base = base def new_resource_type(self, **kwargs): t = self.base.new_resource_type(**kwargs) return self.resource_type_helper.proxy(t) # Metadef namespace property classes class MetadefPropertyRepo(object): def __init__(self, base, property_proxy_class=None, property_proxy_kwargs=None): self.base = base self.property_proxy_helper = Helper(property_proxy_class, property_proxy_kwargs) def get(self, namespace, property_name): property = self.base.get(namespace, property_name) return self.property_proxy_helper.proxy(property) def add(self, property): self.base.add(self.property_proxy_helper.unproxy(property)) def list(self, *args, **kwargs): properties = self.base.list(*args, **kwargs) return [self.property_proxy_helper.proxy(property) for property in properties] def remove(self, item): base_item = self.property_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.property_proxy_helper.proxy(result) def save(self, item): base_item = self.property_proxy_helper.unproxy(item) result = self.base.save(base_item) return self.property_proxy_helper.proxy(result) class MetadefProperty(object): def __init__(self, base): self.base = base namespace = _proxy('base', 'namespace') property_id = _proxy('base', 'property_id') name = _proxy('base', 'name') schema = _proxy('base', 'schema') def delete(self): self.base.delete() class MetadefPropertyFactory(object): def __init__(self, base, property_proxy_class=None, property_proxy_kwargs=None): self.meta_object_helper = Helper(property_proxy_class, property_proxy_kwargs) self.base = base def new_namespace_property(self, **kwargs): t = self.base.new_namespace_property(**kwargs) return self.meta_object_helper.proxy(t) # Metadef tag classes class MetadefTagRepo(object): def __init__(self, base, tag_proxy_class=None, tag_proxy_kwargs=None): self.base = base self.tag_proxy_helper = Helper(tag_proxy_class, tag_proxy_kwargs) def get(self, namespace, name): meta_tag = self.base.get(namespace, name) return self.tag_proxy_helper.proxy(meta_tag) def add(self, meta_tag): self.base.add(self.tag_proxy_helper.unproxy(meta_tag)) def add_tags(self, meta_tags): tags_list = [] for meta_tag in meta_tags: tags_list.append(self.tag_proxy_helper.unproxy(meta_tag)) self.base.add_tags(tags_list) def list(self, *args, **kwargs): tags = self.base.list(*args, **kwargs) return [self.tag_proxy_helper.proxy(meta_tag) for meta_tag in tags] def remove(self, item): base_item = self.tag_proxy_helper.unproxy(item) result = self.base.remove(base_item) return self.tag_proxy_helper.proxy(result) def save(self, item): base_item = self.tag_proxy_helper.unproxy(item) result = self.base.save(base_item) return self.tag_proxy_helper.proxy(result) class MetadefTag(object): def __init__(self, base): self.base = base namespace = _proxy('base', 'namespace') tag_id = _proxy('base', 'tag_id') name = _proxy('base', 'name') created_at = _proxy('base', 'created_at') updated_at = _proxy('base', 'updated_at') def delete(self): self.base.delete() class MetadefTagFactory(object): def __init__(self, base, meta_tag_proxy_class=None, meta_tag_proxy_kwargs=None): self.meta_tag_helper = Helper(meta_tag_proxy_class, meta_tag_proxy_kwargs) self.base = base def new_tag(self, **kwargs): t = self.base.new_tag(**kwargs) return self.meta_tag_helper.proxy(t) glance-12.0.0/glance/tests/0000775000567000056710000000000012701407204016554 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/etc/0000775000567000056710000000000012701407204017327 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/etc/property-protections-policies.conf0000664000567000056710000000163312701407047026246 0ustar jenkinsjenkins00000000000000[spl_creator_policy] create = glance_creator read = glance_creator update = context_is_admin delete = context_is_admin [spl_default_policy] create = context_is_admin read = default update = context_is_admin delete = context_is_admin [^x_all_permitted.*] create = @ read = @ update = @ delete = @ [^x_none_permitted.*] create = ! read = ! update = ! delete = ! [x_none_read] create = context_is_admin read = ! update = ! delete = ! [x_none_update] create = context_is_admin read = context_is_admin update = ! delete = context_is_admin [x_none_delete] create = context_is_admin read = context_is_admin update = context_is_admin delete = ! [x_foo_matcher] create = context_is_admin read = context_is_admin update = context_is_admin delete = context_is_admin [x_foo_*] create = @ read = @ update = @ delete = @ [.*] create = context_is_admin read = context_is_admin update = context_is_admin delete = context_is_admin glance-12.0.0/glance/tests/etc/policy.json0000664000567000056710000000254712701407047021536 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "default": "", "glance_creator": "role:admin or role:spl_role", "add_image": "", "delete_image": "", "get_image": "", "get_images": "", "modify_image": "", "publicize_image": "", "copy_from": "", "download_image": "", "upload_image": "", "delete_image_location": "", "get_image_location": "", "set_image_location": "", "add_member": "", "delete_member": "", "get_member": "", "get_members": "", "modify_member": "", "manage_image_cache": "", "get_task": "role:admin", "get_tasks": "role:admin", "add_task": "role:admin", "modify_task": "role:admin", "get_metadef_namespace": "", "get_metadef_namespaces":"", "modify_metadef_namespace":"", "add_metadef_namespace":"", "get_metadef_object":"", "get_metadef_objects":"", "modify_metadef_object":"", "add_metadef_object":"", "list_metadef_resource_types":"", "get_metadef_resource_type":"", "add_metadef_resource_type_association":"", "get_metadef_property":"", "get_metadef_properties":"", "modify_metadef_property":"", "add_metadef_property":"", "get_metadef_tag":"", "get_metadef_tags":"", "modify_metadef_tag":"", "add_metadef_tag":"", "add_metadef_tags":"", "deactivate": "", "reactivate": "" } glance-12.0.0/glance/tests/etc/property-protections.conf0000664000567000056710000000267312701407047024446 0ustar jenkinsjenkins00000000000000[^x_owner_.*] create = admin,member read = admin,member update = admin,member delete = admin,member [spl_create_prop] create = admin,spl_role read = admin,spl_role update = admin delete = admin [spl_read_prop] create = admin,spl_role read = admin,spl_role update = admin delete = admin [spl_read_only_prop] create = admin read = admin,spl_role update = admin delete = admin [spl_update_prop] create = admin,spl_role read = admin,spl_role update = admin,spl_role delete = admin [spl_update_only_prop] create = admin read = admin update = admin,spl_role delete = admin [spl_delete_prop] create = admin,spl_role read = admin,spl_role update = admin delete = admin,spl_role [spl_delete_empty_prop] create = admin,spl_role read = admin,spl_role update = admin delete = admin,spl_role [^x_all_permitted.*] create = @ read = @ update = @ delete = @ [^x_none_permitted.*] create = ! read = ! update = ! delete = ! [x_none_read] create = admin,member read = ! update = ! delete = ! [x_none_update] create = admin,member read = admin,member update = ! delete = admin,member [x_none_delete] create = admin,member read = admin,member update = admin,member delete = ! [x_case_insensitive] create = admin,Member read = admin,Member update = admin,Member delete = admin,Member [x_foo_matcher] create = admin read = admin update = admin delete = admin [x_foo_*] create = @ read = @ update = @ delete = @ [.*] create = admin read = admin update = admin delete = admin glance-12.0.0/glance/tests/etc/glance-swift.conf0000664000567000056710000000100412701407047022561 0ustar jenkinsjenkins00000000000000[ref1] user = tenant:user1 key = key1 auth_address = example.com [ref2] user = user2 key = key2 auth_address = http://example.com [store_2] user = tenant:user1 key = key1 auth_address= https://localhost:8080 [store_3] user= tenant:user2 key= key2 auth_address= https://localhost:8080 [store_4] user = tenant:user1 key = key1 auth_address = http://localhost:80 [store_5] user = tenant:user1 key = key1 auth_address = http://localhost [store_6] user = tenant:user1 key = key1 auth_address = https://localhost/v1 glance-12.0.0/glance/tests/etc/schema-image.json0000664000567000056710000000000312701407047022540 0ustar jenkinsjenkins00000000000000{} glance-12.0.0/glance/tests/utils.py0000664000567000056710000005201412701407047020275 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import errno import functools import os import shlex import shutil import socket import subprocess import fixtures from oslo_config import cfg from oslo_config import fixture as cfg_fixture from oslo_log import log from oslo_serialization import jsonutils from oslotest import moxstubout import six from six.moves import BaseHTTPServer import testtools import webob from glance.common import config from glance.common import exception from glance.common import property_utils from glance.common import timeutils from glance.common import utils from glance.common import wsgi from glance import context from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import models as db_models CONF = cfg.CONF try: CONF.debug except cfg.NoSuchOptError: # NOTE(sigmavirus24): If we run the entire test suite, the logging options # will be registered appropriately and we do not need to re-register them. # However, when we run a test in isolation (or use --debug), those options # will not be registered for us. In order for a test in a class that # inherits from BaseTestCase to even run, we will need to register them # ourselves. BaseTestCase.config will set the debug level if something # calls self.config(debug=True) so we need these options registered # appropriately. # See bug 1433785 for more details. log.register_options(CONF) class BaseTestCase(testtools.TestCase): def setUp(self): super(BaseTestCase, self).setUp() self._config_fixture = self.useFixture(cfg_fixture.Config()) # NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) self.addCleanup(CONF.reset) mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.stubs = mox_fixture.stubs self.stubs.Set(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True) self.test_dir = self.useFixture(fixtures.TempDir()).path self.conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(self.conf_dir) self.set_policy() def set_policy(self): conf_file = "policy.json" self.policy_file = self._copy_data_file(conf_file, self.conf_dir) self.config(policy_file=self.policy_file, group='oslo_policy') def set_property_protections(self, use_policies=False): self.unset_property_protections() conf_file = "property-protections.conf" if use_policies: conf_file = "property-protections-policies.conf" self.config(property_protection_rule_format="policies") self.property_file = self._copy_data_file(conf_file, self.test_dir) self.config(property_protection_file=self.property_file) def unset_property_protections(self): for section in property_utils.CONFIG.sections(): property_utils.CONFIG.remove_section(section) def _copy_data_file(self, file_name, dst_dir): src_file_name = os.path.join('glance/tests/etc', file_name) shutil.copy(src_file_name, dst_dir) dst_file_name = os.path.join(dst_dir, file_name) return dst_file_name def set_property_protection_rules(self, rules): with open(self.property_file, 'w') as f: for rule_key in rules.keys(): f.write('[%s]\n' % rule_key) for operation in rules[rule_key].keys(): roles_str = ','.join(rules[rule_key][operation]) f.write('%s = %s\n' % (operation, roles_str)) def config(self, **kw): """ Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the fixtures cleanup process. """ self._config_fixture.config(**kw) class requires(object): """Decorator that initiates additional test setup/teardown.""" def __init__(self, setup=None, teardown=None): self.setup = setup self.teardown = teardown def __call__(self, func): def _runner(*args, **kw): if self.setup: self.setup(args[0]) func(*args, **kw) if self.teardown: self.teardown(args[0]) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner class depends_on_exe(object): """Decorator to skip test if an executable is unavailable""" def __init__(self, exe): self.exe = exe def __call__(self, func): def _runner(*args, **kw): cmd = 'which %s' % self.exe exitcode, out, err = execute(cmd, raise_error=False) if exitcode != 0: args[0].disabled_message = 'test requires exe: %s' % self.exe args[0].disabled = True func(*args, **kw) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner def skip_if_disabled(func): """Decorator that skips a test if test case is disabled.""" @functools.wraps(func) def wrapped(*a, **kwargs): func.__test__ = False test_obj = a[0] message = getattr(test_obj, 'disabled_message', 'Test disabled') if getattr(test_obj, 'disabled', False): test_obj.skipTest(message) func(*a, **kwargs) return wrapped def fork_exec(cmd, exec_env=None, logfile=None, pass_fds=None): """ Execute a command using fork/exec. This is needed for programs system executions that need path searching but cannot have a shell as their parent process, for example: glance-api. When glance-api starts it sets itself as the parent process for its own process group. Thus the pid that a Popen process would have is not the right pid to use for killing the process group. This patch gives the test env direct access to the actual pid. :param cmd: Command to execute as an array of arguments. :param exec_env: A dictionary representing the environment with which to run the command. :param logile: A path to a file which will hold the stdout/err of the child process. :param pass_fds: Sequence of file descriptors passed to the child. """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val pid = os.fork() if pid == 0: if logfile: fds = [1, 2] with open(logfile, 'r+b') as fptr: for desc in fds: # close fds try: os.dup2(fptr.fileno(), desc) except OSError: pass if pass_fds and hasattr(os, 'set_inheritable'): # os.set_inheritable() is only available and needed # since Python 3.4. On Python 3.3 and older, file descriptors are # inheritable by default. for fd in pass_fds: os.set_inheritable(fd, True) args = shlex.split(cmd) os.execvpe(args[0], args, env) else: return pid def wait_for_fork(pid, raise_error=True, expected_exitcode=0): """ Wait for a process to complete This function will wait for the given pid to complete. If the exit code does not match that of the expected_exitcode an error is raised. """ rc = 0 try: (pid, rc) = os.waitpid(pid, 0) rc = os.WEXITSTATUS(rc) if rc != expected_exitcode: raise RuntimeError('The exit code %d is not %d' % (rc, expected_exitcode)) except Exception: if raise_error: raise return rc def execute(cmd, raise_error=True, no_venv=False, exec_env=None, expect_exit=True, expected_exitcode=0, context=None): """ Executes a command in a subprocess. Returns a tuple of (exitcode, out, err), where out is the string output from stdout and err is the string output from stderr when executing the command. :param cmd: Command string to execute :param raise_error: If returncode is not 0 (success), then raise a RuntimeError? Default: True) :param no_venv: Disable the virtual environment :param exec_env: Optional dictionary of additional environment variables; values may be callables, which will be passed the current value of the named environment variable :param expect_exit: Optional flag true iff timely exit is expected :param expected_exitcode: expected exitcode from the launcher :param context: additional context for error message """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val # If we're asked to omit the virtualenv, and if one is set up, # restore the various environment variables if no_venv and 'VIRTUAL_ENV' in env: # Clip off the first element of PATH env['PATH'] = env['PATH'].split(os.pathsep, 1)[-1] del env['VIRTUAL_ENV'] # Make sure that we use the programs in the # current source directory's bin/ directory. path_ext = [os.path.join(os.getcwd(), 'bin')] # Also jack in the path cmd comes from, if it's absolute args = shlex.split(cmd) executable = args[0] if os.path.isabs(executable): path_ext.append(os.path.dirname(executable)) env['PATH'] = ':'.join(path_ext) + ':' + env['PATH'] process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if expect_exit: result = process.communicate() (out, err) = result exitcode = process.returncode else: out = '' err = '' exitcode = 0 if exitcode != expected_exitcode and raise_error: msg = ("Command %(cmd)s did not succeed. Returned an exit " "code of %(exitcode)d." "\n\nSTDOUT: %(out)s" "\n\nSTDERR: %(err)s" % {'cmd': cmd, 'exitcode': exitcode, 'out': out, 'err': err}) if context: msg += "\n\nCONTEXT: %s" % context raise RuntimeError(msg) return exitcode, out, err def find_executable(cmdname): """ Searches the path for a given cmdname. Returns an absolute filename if an executable with the given name exists in the path, or None if one does not. :param cmdname: The bare name of the executable to search for """ # Keep an eye out for the possibility of an absolute pathname if os.path.isabs(cmdname): return cmdname # Get a list of the directories to search path = ([os.path.join(os.getcwd(), 'bin')] + os.environ['PATH'].split(os.pathsep)) # Search through each in turn for elem in path: full_path = os.path.join(elem, cmdname) if os.access(full_path, os.X_OK): return full_path # No dice... return None def get_unused_port(): """ Returns an unused port on localhost. """ port, s = get_unused_port_and_socket() s.close() return port def get_unused_port_and_socket(): """ Returns an unused port on localhost and the open socket from which it was created. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('localhost', 0)) addr, port = s.getsockname() return (port, s) def xattr_writes_supported(path): """ Returns True if the we can write a file to the supplied path and subsequently write a xattr to that file. """ try: import xattr except ImportError: return False def set_xattr(path, key, value): xattr.setxattr(path, "user.%s" % key, value) # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs fake_filepath = os.path.join(path, 'testing-checkme') result = True with open(fake_filepath, 'wb') as fake_file: fake_file.write(b"XXX") fake_file.flush() try: set_xattr(fake_filepath, 'hits', b'1') except IOError as e: if e.errno == errno.EOPNOTSUPP: result = False else: # Cleanup after ourselves... if os.path.exists(fake_filepath): os.unlink(fake_filepath) return result def minimal_headers(name, public=True): headers = { 'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': name, 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', } if public: headers['X-Image-Meta-Is-Public'] = 'True' return headers def minimal_add_command(port, name, suffix='', public=True): visibility = 'is_public=True' if public else '' return ("bin/glance --port=%d add %s" " disk_format=raw container_format=ovf" " name=%s %s" % (port, visibility, name, suffix)) def start_http_server(image_id, image_data): def _get_http_handler_class(fixture): class StaticHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-Length', str(len(fixture))) self.end_headers() self.wfile.write(fixture) return def do_HEAD(self): # reserve non_existing_image_path for the cases where we expect # 404 from the server if 'non_existing_image_path' in self.path: self.send_response(404) else: self.send_response(200) self.send_header('Content-Length', str(len(fixture))) self.end_headers() return def log_message(self, *args, **kwargs): # Override this method to prevent debug output from going # to stderr during testing return return StaticHTTPRequestHandler server_address = ('127.0.0.1', 0) handler_class = _get_http_handler_class(image_data) httpd = BaseHTTPServer.HTTPServer(server_address, handler_class) port = httpd.socket.getsockname()[1] pid = os.fork() if pid == 0: httpd.serve_forever() else: return pid, port class RegistryAPIMixIn(object): def create_fixtures(self): for fixture in self.FIXTURES: db_api.image_create(self.context, fixture) with open(os.path.join(self.test_dir, fixture['id']), 'wb') as image: image.write(b"chunk00000remainder") def destroy_fixtures(self): db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def get_fixture(self, **kwargs): fixture = {'name': 'fake public image', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'size': 20, 'checksum': None} fixture.update(kwargs) return fixture def get_minimal_fixture(self, **kwargs): fixture = {'name': 'fake public image', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf'} fixture.update(kwargs) return fixture def get_extra_fixture(self, id, name, **kwargs): created_at = kwargs.pop('created_at', timeutils.utcnow()) updated_at = kwargs.pop('updated_at', created_at) return self.get_fixture( id=id, name=name, deleted=False, deleted_at=None, created_at=created_at, updated_at=updated_at, **kwargs) def get_api_response_ext(self, http_resp, url='/images', headers=None, body=None, method=None, api=None, content_type=None): if api is None: api = self.api if headers is None: headers = {} req = webob.Request.blank(url) for k, v in six.iteritems(headers): req.headers[k] = v if method: req.method = method if body: req.body = body if content_type == 'json': req.content_type = 'application/json' elif content_type == 'octet': req.content_type = 'application/octet-stream' res = req.get_response(api) self.assertEqual(res.status_int, http_resp) return res def assertEqualImages(self, res, uuids, key='images', unjsonify=True): images = jsonutils.loads(res.body)[key] if unjsonify else res self.assertEqual(len(images), len(uuids)) for i, value in enumerate(uuids): self.assertEqual(images[i]['id'], value) class FakeAuthMiddleware(wsgi.Middleware): def __init__(self, app, is_admin=False): super(FakeAuthMiddleware, self).__init__(app) self.is_admin = is_admin def process_request(self, req): auth_token = req.headers.get('X-Auth-Token') user = None tenant = None roles = [] if auth_token: user, tenant, role = auth_token.split(':') if tenant.lower() == 'none': tenant = None roles = [role] req.headers['X-User-Id'] = user req.headers['X-Tenant-Id'] = tenant req.headers['X-Roles'] = role req.headers['X-Identity-Status'] = 'Confirmed' kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': self.is_admin, 'auth_token': auth_token, } req.context = context.RequestContext(**kwargs) class FakeHTTPResponse(object): def __init__(self, status=200, headers=None, data=None, *args, **kwargs): data = data or b'I am a teapot, short and stout\n' self.data = six.BytesIO(data) self.read = self.data.read self.status = status self.headers = headers or {'content-length': len(data)} def getheader(self, name, default=None): return self.headers.get(name.lower(), default) def getheaders(self): return self.headers or {} def read(self, amt): self.data.read(amt) class Httplib2WsgiAdapter(object): def __init__(self, app): self.app = app def request(self, uri, method="GET", body=None, headers=None): req = webob.Request.blank(uri, method=method, headers=headers) req.body = body resp = req.get_response(self.app) return Httplib2WebobResponse(resp), resp.body class Httplib2WebobResponse(object): def __init__(self, webob_resp): self.webob_resp = webob_resp @property def status(self): return self.webob_resp.status_code def __getitem__(self, key): return self.webob_resp.headers[key] def get(self, key): return self.webob_resp.headers[key] @property def allow(self): return self.webob_resp.allow @allow.setter def allow(self, allowed): if type(allowed) is not str: raise TypeError('Allow header should be a str') self.webob_resp.allow = allowed class HttplibWsgiAdapter(object): def __init__(self, app): self.app = app self.req = None def request(self, method, url, body=None, headers=None): if headers is None: headers = {} self.req = webob.Request.blank(url, method=method, headers=headers) self.req.body = body def getresponse(self): response = self.req.get_response(self.app) return FakeHTTPResponse(response.status_code, response.headers, response.body) glance-12.0.0/glance/tests/__init__.py0000664000567000056710000000225512701407047020676 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance.cmd as glance_cmd glance_cmd.fix_greendns_ipv6() # See http://code.google.com/p/python-nose/issues/detail?id=373 # The code below enables tests to work with i18n _() blocks import six.moves.builtins as __builtin__ setattr(__builtin__, '_', lambda x: x) # Set up logging to output debugging import logging logger = logging.getLogger() hdlr = logging.FileHandler('run_tests.log', 'w') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) glance-12.0.0/glance/tests/test_hacking.py0000664000567000056710000001337612701407047021610 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.hacking import checks from glance.tests import utils class HackingTestCase(utils.BaseTestCase): def test_assert_true_instance(self): self.assertEqual(1, len(list(checks.assert_true_instance( "self.assertTrue(isinstance(e, " "exception.BuildAbortException))")))) self.assertEqual( 0, len(list(checks.assert_true_instance("self.assertTrue()")))) def test_assert_equal_type(self): self.assertEqual(1, len(list(checks.assert_equal_type( "self.assertEqual(type(als['QuicAssist']), list)")))) self.assertEqual( 0, len(list(checks.assert_equal_type("self.assertTrue()")))) def test_assert_equal_none(self): self.assertEqual(1, len(list(checks.assert_equal_none( "self.assertEqual(A, None)")))) self.assertEqual(1, len(list(checks.assert_equal_none( "self.assertEqual(None, A)")))) self.assertEqual( 0, len(list(checks.assert_equal_none("self.assertIsNone()")))) def test_no_translate_debug_logs(self): self.assertEqual(1, len(list(checks.no_translate_debug_logs( "LOG.debug(_('foo'))", "glance/store/foo.py")))) self.assertEqual(0, len(list(checks.no_translate_debug_logs( "LOG.debug('foo')", "glance/store/foo.py")))) self.assertEqual(0, len(list(checks.no_translate_debug_logs( "LOG.info(_('foo'))", "glance/store/foo.py")))) def test_no_direct_use_of_unicode_function(self): self.assertEqual(1, len(list(checks.no_direct_use_of_unicode_function( "unicode('the party dont start til the unicode walks in')")))) self.assertEqual(1, len(list(checks.no_direct_use_of_unicode_function( """unicode('something ' 'something else""")))) self.assertEqual(0, len(list(checks.no_direct_use_of_unicode_function( "six.text_type('party over')")))) self.assertEqual(0, len(list(checks.no_direct_use_of_unicode_function( "not_actually_unicode('something completely different')")))) def test_no_contextlib_nested(self): self.assertEqual(1, len(list(checks.check_no_contextlib_nested( "with contextlib.nested(")))) self.assertEqual(1, len(list(checks.check_no_contextlib_nested( "with nested(")))) self.assertEqual(0, len(list(checks.check_no_contextlib_nested( "with foo as bar")))) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_check_python3_xrange(self): func = checks.check_python3_xrange self.assertEqual(1, len(list(func('for i in xrange(10)')))) self.assertEqual(1, len(list(func('for i in xrange (10)')))) self.assertEqual(0, len(list(func('for i in range(10)')))) self.assertEqual(0, len(list(func('for i in six.moves.range(10)')))) self.assertEqual(0, len(list(func('testxrange(10)')))) def test_dict_iteritems(self): self.assertEqual(1, len(list(checks.check_python3_no_iteritems( "obj.iteritems()")))) self.assertEqual(0, len(list(checks.check_python3_no_iteritems( "six.iteritems(obj)")))) self.assertEqual(0, len(list(checks.check_python3_no_iteritems( "obj.items()")))) def test_dict_iterkeys(self): self.assertEqual(1, len(list(checks.check_python3_no_iterkeys( "obj.iterkeys()")))) self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( "six.iterkeys(obj)")))) self.assertEqual(0, len(list(checks.check_python3_no_iterkeys( "obj.keys()")))) def test_dict_itervalues(self): self.assertEqual(1, len(list(checks.check_python3_no_itervalues( "obj.itervalues()")))) self.assertEqual(0, len(list(checks.check_python3_no_itervalues( "six.itervalues(ob)")))) self.assertEqual(0, len(list(checks.check_python3_no_itervalues( "obj.values()")))) glance-12.0.0/glance/tests/stubs.py0000664000567000056710000001571212701407047020301 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite""" import os try: import sendfile SENDFILE_SUPPORTED = True except ImportError: SENDFILE_SUPPORTED = False import routes import webob from glance.api.middleware import context from glance.api.v1 import router import glance.common.client from glance.registry.api import v1 as rserver from glance.tests import utils VERBOSE = False DEBUG = False class FakeRegistryConnection(object): def __init__(self, registry=None): self.registry = registry or rserver def __call__(self, *args, **kwargs): # NOTE(flaper87): This method takes # __init__'s place in the chain. return self def connect(self): return True def close(self): return True def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank("/" + url.lstrip("/")) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() server = self.registry.API(mapper) # NOTE(markwash): we need to pass through context auth information if # we have it. if 'X-Auth-Token' in self.req.headers: api = utils.FakeAuthMiddleware(server) else: api = context.UnauthenticatedContextMiddleware(server) webob_res = self.req.get_response(api) return utils.FakeHTTPResponse(status=webob_res.status_int, headers=webob_res.headers, data=webob_res.body) def stub_out_registry_and_store_server(stubs, base_dir, **kwargs): """Mocks calls to 127.0.0.1 on 9191 and 9292 for testing. Done so that a real Glance server does not need to be up and running """ class FakeSocket(object): def __init__(self, *args, **kwargs): pass def fileno(self): return 42 class FakeSendFile(object): def __init__(self, req): self.req = req def sendfile(self, o, i, offset, nbytes): os.lseek(i, offset, os.SEEK_SET) prev_len = len(self.req.body) self.req.body += os.read(i, nbytes) return len(self.req.body) - prev_len class FakeGlanceConnection(object): def __init__(self, *args, **kwargs): self.sock = FakeSocket() self.stub_force_sendfile = kwargs.get('stub_force_sendfile', SENDFILE_SUPPORTED) def connect(self): return True def close(self): return True def _clean_url(self, url): # TODO(bcwaldon): Fix the hack that strips off v1 return url.replace('/v1', '', 1) if url.startswith('/v1') else url def putrequest(self, method, url): self.req = webob.Request.blank(self._clean_url(url)) if self.stub_force_sendfile: fake_sendfile = FakeSendFile(self.req) stubs.Set(sendfile, 'sendfile', fake_sendfile.sendfile) self.req.method = method def putheader(self, key, value): self.req.headers[key] = value def endheaders(self): hl = [i.lower() for i in self.req.headers.keys()] assert not ('content-length' in hl and 'transfer-encoding' in hl), ( 'Content-Length and Transfer-Encoding are mutually exclusive') def send(self, data): # send() is called during chunked-transfer encoding, and # data is of the form %x\r\n%s\r\n. Strip off the %x and # only write the actual data in tests. self.req.body += data.split("\r\n")[1] def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank(self._clean_url(url)) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() api = context.UnauthenticatedContextMiddleware(router.API(mapper)) res = self.req.get_response(api) # httplib.Response has a read() method...fake it out def fake_reader(): return res.body setattr(res, 'read', fake_reader) return res def fake_get_connection_type(client): """Returns the proper connection type.""" DEFAULT_REGISTRY_PORT = 9191 DEFAULT_API_PORT = 9292 if (client.port == DEFAULT_API_PORT and client.host == '0.0.0.0'): return FakeGlanceConnection elif (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): rserver = kwargs.get("registry", None) return FakeRegistryConnection(registry=rserver) def fake_image_iter(self): for i in self.source.app_iter: yield i def fake_sendable(self, body): force = getattr(self, 'stub_force_sendfile', None) if force is None: return self._stub_orig_sendable(body) else: if force: assert glance.common.client.SENDFILE_SUPPORTED return force stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type) setattr(glance.common.client.BaseClient, '_stub_orig_sendable', glance.common.client.BaseClient._sendable) stubs.Set(glance.common.client.BaseClient, '_sendable', fake_sendable) def stub_out_registry_server(stubs, **kwargs): """Mocks calls to 127.0.0.1 on 9191 for testing. Done so that a real Glance Registry server does not need to be up and running. """ def fake_get_connection_type(client): """Returns the proper connection type.""" DEFAULT_REGISTRY_PORT = 9191 if (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): rserver = kwargs.pop("registry", None) return FakeRegistryConnection(registry=rserver) def fake_image_iter(self): for i in self.response.app_iter: yield i stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type) glance-12.0.0/glance/tests/var/0000775000567000056710000000000012701407204017344 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/var/testserver-not-tar.ova0000664000567000056710000050104112701407047023651 0ustar jenkinsjenkins00000000000000PNG  IHDR8ybKGD IDATx{秣n#˗M@?f:ӋN]&NtiNji;ĩ/:$&mۭ7-` $0F@B}w8Gn#?~w'7Ӗ83}+!i <紾bl3tIbCOl$Iּן?2ycvy;BUfUzTJk=>Vo}diIͥOOAmߑW}LhDZ'TX6ڣ'kZX9/-_H1<p37`e\צ[dmBUCo/&Wʯu=c- XD^j*9oYUT2L[ i՞$-WzM+~i3b9t8C߱1WNB}0V;?7T%[Z#cJJήʸN**f9{ly|LO򏾸k?_L@=75I̦uĶ2,.tw`Z}Vje)DOcO=cՑWvubD cM㿱k|>` p dr捗Kצd6ckӫ9cUL[KHZ{%=iӪ\T=Ԛ!8t/{z>`{p˖\&Vo:,@৩Xh92۹ղ&IW{bDO>T}%VUijY7ΞZ[m[˜?~w|:l8>&}i)~u|)JrHՑi%U휣|%_N*O ECiY{{.+ROTz-6Oo}o,L@$wl:ru5 cxq4&CjMK;+Iz'?bDSgWISkzPRK懞\4VzoWjڳ0~SO}}D:[ù +cĶ2,.ƁRD?iIͥ],}H[;T[='Y[gů_FgSxiȘBo{v=,p֖ &%}i)~K+qWC U/sj$7'9IOJ7myU]=TʕC@OwCý_޺k:D%{7o5+Lfj=GMT+ $$IxDnL#zU*ϴԇ{Cjz~V $ݴiվ6&zRrE%ύT.*$^^U6L~2&/*u`tvsҵ| *ƖI^$&ynL\乡Ъ>0ّ_UWU/!/T;Rc/lH@ݱu8Oͦ\Z'm6ԱոCBտ}`hE~%yi*s=5ECՓvmpO*zwhz^K;wOy<!;n\ʚ4KKM8}6\q^䅡?IB.V=Mw"ޞ7$m$m;w<3ˊ/a8__ UVk ^{iRDUI. @M27Vޫ]7PRLTʴ2CG":tMM:uĶ2,rH%ӡsCnɡ.\kk bO^-yaz&Vߞj7SnJ^VzupHm;w{BSF%ǥkd2V1Jwш>&j+@O|'{{eo!?-ӪS豈^1PO C[z. wB@Snٲt%6\j$8c,L]2T^RDoi?.E$?=ұ\:T=VYU~(CCjV}Ƿwp'o_1&擾?:Oc%ma!#zUeUnJrQ#s\!\>z$1WcJgv`Onݵﴤ| sY1c5 cxxYzfL.jZ.JɳUٔňRKWɖ^W>:TԑizƴWvחvEt8=>u>o8߰?|*ibdӅqCWEԿ޶$ʏlJwH;!|i*UO>f=ա=_xj+O@D%{7o$}\ |2M~y+qxGC{sIK^R}s"3IKV~p}rܝʖiՓiB\HԮI_?-[VZ8py UT}K+0~fxʮiK-$VՍIkivTG1'E||HV >98MޝEt8e .gq W?Oc%qkט\9TV%IG̷W'S~Pш>M>blWDt8u 3ܱu{0j>iIͥErW89M}9DO 9џHesՏqz'OL?>VZUyeZ{))H@-_;L.+lZ沸9X%+W,T=8d\'P/I8Lfj=GMކCM^Hjn?\Z.Mrp<^ǒ[M5I}z~yjf2=C/^m۰N pztӦU&d̦uK['8Vm_|dZvt^^uUҮLrhZy-Fԏ\3Tg{o*izܡz+W":tNJڽ7R|JT+qT1OӴ?K$POrdH'^Urш>T=<[MvW}+O@r֭3+YWp|^m.6\jUM+L-E$zϕI>daHw43\;TbDm!pP=+K@ ekeI_Zxmz8pcC5cڞ\$-6"-,+T%z1vH}WΟ~dWͿC="t3ˊ/~JW/q+qk"z%w֘ˏFpO%ձi!uwUn^zgz:2v`|fRD!ݴiվ6&zR<̦e.wOU?VVۓ,E]\ZUIcJ~9i6>Y>z4MC*i_~W|K;vY#G@8ETݼq.}\6}8Xi庡ڏ+|ƱͶkcJ>jyΤn ~qVN.=-+-J<5jsi5٤RV&Y RcLjTreqoч6>٪}tj%z?zJɣ":\:tǍ/XY3&}i)~u|)JLp,O=^9_J2|6-Tnn;+57MSGz{tXn[>{hYOg8߱15+qW3PydZuPy&iK׹\dNCrWU^7 }UuBʘ~`mtaUD5|cqܤVMfSxSM\9j|wZ#zK$^m1WmOyl멭Ǯs﹯Z]:Z{80|ADܲe[{˦VMI]3n$ٳz>ؒ~V(5T=T}rzwzYY5V+*wg{z!yyl0Ԯv9G}eYOc:pڸ Op$i9o~X;ozBӕ>IJ=94&%ڈޖ"zիkw_{oS7\}>_U$ɓcsI^Xڹ\K}3IX{ZRucwg1R=}Xmk] {ʆ!Xuxlk- {nȟKoU򗒬x+yjYWM9":'ۘzxڳyڮ|(I[$Փz$G#I>1oնVrwZH^轎 SkW뿱}e= )zk&&u+N+UowW͕oִͤgezxZ>ﴴvJV=U-yi夭$gwfL=i~LU'W%p>5YnO=c#ʋCL3ϼ݊~o>Oa,~]?-v̖;iyKr~iV/ʫ+ֲަIr餵oWuUٝ.hkR'\Z'VUuzpgΤԪHڧ<҅L&Yg8phY 18iLuW֒Kr[|mE\&-6niɹ+uDO;ѓ%hyk+$&(ɡlRD;/>LMڧ[pkPi9/8)شM~n;o2L'U; Iz/Te[UcI1zL=V剪3I2&wV$1y(_+w끞e'熴ck߸.d?8i/}vg+q|+7$?{oIؓ? l*ؓ#Hkm} ICI6%zk@ZNoI;l$svϥ|*-miZpoYq r%_DA@SugZ,JWڷZˆ^ Iv#z[Uٓ#,LZ5?~Qm1ߟ䆤–"GcICYg~i,4&$!9 #FfFJQWŮJ.U."IM* ځϒ$DwYM{i}3{fffff?9{7%$Csg5LzwiEUGt3333ل=}3Gp%3Z0ؚ ))x^O zDh`o= ߦOt333@| _%g@-HtQ`5*"vf8!>T!Ӎ{_ADnDDE6?Z+% u#HxHbR|N.Rk'_Gt3333)ffffS7O44cxʡDG_g|h&yZ~(󏀡G"}ْ]ijt^ P-GP5~ǔDϐ<g IDG2It8LK)usvH' 1Ү$:@@ $Ru!J#њrd]O4WgF\@߁/WoO_43333r@733ظq]7.WDgnxBbɶsOOǡG",AI[lgfsfffff`@.%FtZh@Iѐv&|*`sJ݅x+v|WsD73333/#sq%@M'Bas%N꺈kv V2qWtL33ADm6I90S es|%D5BOJIKGh-5Ԏ/Kmӧq@733/7mG]`SgC,dWDoPp;>j.L:aAPni,yVՉ/z>D йpD7333H@jȑIt`>O]z5 ;Utm< z x%`3=&J337̮1-O&2߃j4|>a?]2)^7vppp&fff Z@;5 qТAV];>A8Ngj}S$+0 h%ف#!B$4"=y_#[A驄$xTʧݏ8$[{Qj]/Oչsgg>> ]M5&Ηu1&r\M-{6"z3YDW'P"8nffff7HDX@$ͧa D@#n}C$w| 3TC |fffffރK\6Ҷ៛$|cيM2yxC S`k'̓<}.֝3ݟ芐 +AC䏦fffffRB+rܗ)A>rcJK?7Ͻ1iffffv39ٴ‚[vXԙlr nJ(5!i5c#:`Wx^L6N$Ѵ#"mM5UщwK#gEo{+%$,lllH餒5RW'Ɵ_o橙K3333̦7*X7!UC4":J]kgDE璯DBDdn)]C]Ģ6hU #:9!>03333{O: :܍CL:p`NDם%$Ւ^܍8 gƙM$fff6e_^73y $W' i!Pgjp$zQlFt$,Ic# ]Ä6d-t#zU%M,b∎X툓#(fffvCjݺ;u`.h ߗX@aR?@sV&csѯDO&z=tmDOu/>D~~u0mmӅCךRDEt=,qD73332PݙyW$Y}~#:C4*d.R(=͏IB>|nAfffvC_^86ɧ$GZzJ1e2pD'532Ól80ѩD @tvx7 #:+=@ұx 3333D0P y'WXɟ2CV#zt"JI2scJ%婔2ʂ/}3{fffff/t333,\[LzlDoì  e/84GЗAR[y.2ٙ D?aE!!&M󅬑Mm)VaWA7"īL5Ed Z%ͿO4Bdf l>VR8y*h_nfff݁EQ+BwgfHхfY8,KOtPb:{L?#!i"zՠ|FWD?l&5bӉQ;|jiIz5#4\CWLuH^$467ɢ!,ď6IyJR4_7Ξ=6wiffffp@733I}y:%VNH?Bd9?% "m1F:hAC1B"UwAK|Z-՗CTٓ6iW":ؗřZIt3333V|AkkIw6ɟ,jSx!BA}J)Piģ_Z64="ޤD/ʹˁ^ (t&Яhkh8,Ht.tv*zDeDeU TWVQ`UD#:hhs7333M:Xv>!65@w'l|*WR9_Ф :hRRl/-^ڷ<{to=q@733Iy##I@|":L.hDHB C:}IW2{݉v܁A]oB љz'aU&K,bj}IJl}e# ŪL=Dtr^'&ОI:IqW'3fV x5P4)j^Gl$uD*S~ycXǻR!p)ۄ>KրύYډ"V rѮ]R>2ޔz ;>! +co_^M'<652s0a^պnb] ZUE+jZ..Q E$BQ)UP#D~W4(Mf\n_PS/;+$I7աd4MCۿz3rfffffM蟬Y/%~?^ m\Ot/0 X~Ot;"ICtw)=GF:pWs씷b0w5 .C X6A *Цs#b4_>"**VVWTR P%\?ywpf$\:?hJv^&k|2!}8P`D:;=pD\J<pB]<)i[=#t'ӕ=Q]y".'98d 4.W.B 콐5QA=E2',#!*ZuM_.zD  *jA:?Co]zSg_ւ60dC4ѿ5MMkkWǒ|Δ6H[ 9Eqx@Oӥp:ac籉#HЍ"wFA#zh{؈F~^`GD)xt}N5<"ڐwMm+t}f',$O**ꨩKԥuVu:DwtTynɼxꝷw/^j&z?鵙-!_[;pFНy!m՘сUp2cz&t" =/tﮍ;ш.QI#gG]c"T kr/umcn't$9Z1n"P,7,-*p-9Ͱ*jEҺ\TQTQD6Fgs^|PvonffffjfffSv^]p1w]}@p6"O }gD̦џ{:r蟢㯏bڈDt}wpmDJly4x&GK0 :kZyGt%t#:zz3333{TJ*V]KTJEZݲS7ЉKo/K^D333Ov* z6: mny\S)=> 4OVnҾoo_?鵘nfff=}uV[.ؚI0&P %,MY:-.zq8 =SzNT<%0@‰Ԧv+Xy(hgLg8drREсEjn[p x`op&M/pGt333pfJ7Nr.2&?bx 9#]+HTqLDt^Rk#:E1 .NFC$^dgZZn!6lRZ#*]KhZb)qP n?IRZ_eGՕ+ߧN >z=$: rmϯNGz%](x&k"vl~>kDt`1b{[{K!"St*g-@gff-ꪦ:,qIEZ>H۾fzffff[̬;4~c Tb;$f4?<Թ1z5] Bk3D9OU9`)7(tHۻ;~W{GnF\P{qǘ?Vp̤0lm&荎ZQPnffTEMTQօZe(JQPJQT"PV:97JU[_gzffffYO_]9}&z!w "nWm2'% ":hEݿ{QrB kx {Dt]CEa#gD?ynLKУ7:7I#ED7339LPl)qCU6ҿ>k1333jfff=(4E ԇC7çp5jLD iKEpD8W]coY'a"Gtg1gOFu]"#CpDM'sI!@^n8ǛX`wfffS@_5M;P]*BD! RfP]7/^酘Gc533~kӒy;O'QEr.pD/3yCxܓGttqg5*X'zOk\Dȃɣ!f!-{El W~#)Ds-'S1[1&ܖpl VCff@-꺦/ZhSIYJRBfjɏVn{|L/>Z nι\3Eя3ȑ.StH'+>yGgKE@VhFs?]ӈ'qpD}|DG\,p@׻>*5="znoĘ?J#r=!eCCФA'p[Ԫ<%n$8aafffffff֭+gz}NDэ":| vDBJ8[IIwwWF'ڑbOa4 4D=>5.U> N y])w{NMLkB7Zٌ꣨WZ+U}rRz:3i`RDjG2{4@d$k#_%R,F>/](KA,hۘs5:NgvnDGкt=p7c"z!9Vljst:xVcu##L<]w"3&$Lc+ي꒧}tlW23333zk,D͛v)Uk#,ϻB#ze.x)%B46Kǀ͝ыR. ]lvR/jlD4L=E:N7o ZUuTh]jRe(@u*UIq3`)Wl?u٭z݁E f;EHтF#pIt8RI%xk#bb }JkO7 U ÓB]##ro8Q6"z%GD΅t:BO Nюa&!nffDg ĕHuHOه+>~酘٭zz/ lTB1F} +^GOR! G|^+^MKz;p 'Ӊ ֌Bn :h ަ˕x" 7:SڙjffV,VW߉JTtJ"`f?єWWl10333[lfff=UU O' enF|4Ezr8𴹴hxJ.\:%ЉEzUpo##b+ 㚈.шp*G5}?W5[ODpRvDoēC $o4g$wL5fJ>GIew=9\~]RgwE mQ7#ޮ ΏGZ,1ۻ jt2J׻ws2BW#r{IwDnȟĕZ|_7|CǙbD?ӓff3-ꨩ2TѺXjJ(JFPnf6~fzffffv̬ܰ|VՙD4gBl3lpwDdNItpDO^K#z"yL%^{An/jD4Cdp:|q^יT:Sff7xy?zˆ_ϗ虤؈:Q,v]p9 G];-0{LDK_ǻ+9\I:秇_Kۻ_ ]s:Gt]2,H_} 3^fQ~'KE(DA}Ye;333338YOz&=jN^o%P/w GF72W3n]p&ڪῃ+GCF" OdlD]5۹$p{ Y#čަB~D=>̚M#Gt3ixJWZ+U=%nff3)U[Ln=fffo *c %Tj`lD/`=>`MsHm:I# oXϸRKӍ*Dvց#3/Z ,>ޮB;"83C,C=䲆oKL;vr/Dv櫂{%#GEA}Q5uպXR E Yff!֪ϛu٭_YOj@S鵛Z) )snD'->v\dmR' ,݈^sx%5 Vbd28\L)'Gc؍q?^ows& #lJt>ipaf3Sffb>ٟ]u٭z+Wxf^^dSq.aC+2kTmM2QOҪfԪRW-y%.IJįO$It$I=GwF_fЮ`co5GVN D'Igj}$)^-^;b.GVy:mN+oƼ^u䗀~SW)j:{hDO~ֆ{$TS;#S$}wn"$I%I@s*:MX?%^DsbnJk):LDF<>AFu8ZEN\XռAxcu涔| ?k'6^.әI|5Ft]):2YO]4%Qa$I7¶߹A%Ib@$I}cǎykn-1TG/uo_у`[I:i" 켈fv{R:m {k8PǍG~^]l-:?2UU1QM29YOoEiMuhEVU0QՅC$-ʶߙ$Ix=K$ 4C ";(OGO#܀^^_Mw7̻/Ǒ>&YLUUUѪ&luT"GVIf@$I"0KDoz{ q6lMaЉU^0눃EVa}F7lxNDNבAaL>@kEt:/`Y/2򯭎^MɝL«SwFSx+2QM\D픸$IZl?h@$Ip%I@z/R1u\KepΜJ:g[*  '[}g*Gu0Dlѓ<9Ud*1F*z*~<~VG;(UV%oU|9C#zkD9):j+h]qJ\$\v;kGI$,tI4П^ǧ8\5Z8RW0{&:۪23{;"dw*Gue:SWE3U}t]Gdg8Q3wFz> "z?:ם;aǞUċ=ॉ)n"Ogq^W %T͝y; hULML2-%I]$ICg@$I]:Ñ&T׊ c@D*":a:Ձ|=DNWpht:"f"x`U0$kPD:"z|Oҙn .":xkہUjM9%.I4.o7K$i $i?/EGt$SFtB-QUE7"sB}_*3%|t5 ٺ|:31Mhҙτ<ҙpḞM|!ח~̯5+@Ȇ}puO{ '%&'&wlߴ㖻7ܵe$I4ym/I$ ]$ TekE&Mf.&G3E-d+98xl6`]C<8cs=^{EGg#:L5i΄;>g_DOLFR: "zng}yPDREu~离{7ظp.Iez.I`@$I3ЯdWE(4-;L&?f&xL*kt"چx0b;p Wَ}$f GB>@WzD8pc3D;2yc&O߿e up.IeK$i%I@+m ~ [{'4\D\`;ܓ-'څN,32edFtt#zXFLɺsFlDogRxy3I.|$ڸ5ok۹$I$I]$ TVz5t(0ћyI_DEIœYt"T_sοЎ<\D/s48}]x "z)3 ُ u׵M]ϳ$IV(@$Ib0K\}6p_H'g6l*~P2$wy=';݈?.wKF`_CD'O4M6 `[UiEюwVû#$IJ$i%I@5+~ ~js#zC>\X ?ND/=Dd%FnDOM@C-i^{khgy*nO љj(S%;+O7gd)wy{#z/!/$L漈D^Kmw&@0v枾 >d>WEx@/w>VY:Tӟ߼mݚzb?3I$idV[~%Id $iju^Z__,m6)<Ftѫ؛J+M;^>y3yɫ#zg=sd>ڷFJdC3ѓ=k#m{rmkj~\$I%NK$i $i&{JRTxu$_Ouz=لHi2I<0Qqwmɯ_vDdڰ=ِ|$uAD}ɵc$IF~.I`@$IUVk@XWu#z^=>;ދ$-uM?gw{-|*/"zžv"z?ϔ;/y " 1;M{=K>YӋUNڛD_ۚs!I$t]$I.I @g2IM?|I>;t=6<:|I"zCu>ŶND|:s٣|m6ߵ'M$IFX%I4|tI4MVdMFo6#%nD)E$IZ-%I4|tI4Pzͨ|&3d6&Ͼ^`oD'bf9N7gޙmK$!$ܳn$IҪ/,$IZ tI476WG]x6۲bg#z&@x['+_I$is]$IWIjْT,ѻ%׋ |< '$!uƵKP$I$I$i0K;"~VQʫ"z?%W|`~DOxє[&||ۤ\$It]$Ig@$IU> Qr$\DόI$h %A۷ݴe /I$$I$ ]$ T<}X $ voߗY )ɋ}R.^$I[a@$I%I@NUTɾDH 7_E,O mړkk$IP$IZtI4Pڀ>\Q%_(\ ٻ}UDv7YNgw}ĚMKdI$iY%I4ttI4PU"HK^E to[\[׬\K$Ic*$I΀.Ijx>~}D|>3_޼w"r%_$I$I$&tI4P |B漈}{ ^XW)$Iz_Y$I $ij"2oY8 n[fj4$IU$I΀.IJ'ЗvA ;/ojMy$Iԧ~$Iɀ.I  x>,FPf'ѷLG@I$i 9.I`@$I[/'ɒ|وsI~`$IU"L$I:$I Ws>֐nx"|&I$ g%I+I4F*x.3dg;p[F,I$I$IZ $i 'G'LJ\WחG&I$ix$I]$ UไB~}.$I n.I3K@ b*^$I4v3%I4|tI4PBE<{^$I4v" $I:$I(mkZ^$I$I$tI4#)>$I$I1K$i $ipY$IX $I΀.I4,(I$I$IK€.IJ 1!I$-YFI$@tI4Pm%I$p$I%I@#|.$I $I:$I tI$Ic- $I>$I(*@>$I|Q $I>$I($IX:.IE`@$Is`$I-$IZ^$I$I$I$I]$ <Ǹ(H$I$I΀.I  I$itI$ ]$ vlx]P$IZ ʒ$I:$I(}lDH$I4$I%I@=F|&$I]$Ig@$I$I'%I4|tI4P I$i!@$I"0KJVqeAI$I$IZtI4P8>6‚.I$-NK$i%I@}L$>$IBŀ.I3K@NK$I %I $i4 J$ITT$I%I$I$-GtI$ ]$ ˂$IB&$I>$I(-ET>$I$I $i Y|.$I3%I4|tI4PTq!I$ d@$I%I@cħB$IZ3%I4|tI4Px$Ip]$Ig@$I$IԞ.IE`@$I%sc'B$IȀ.I3Kn;>,(I$I$IK€.IJ I$i[M%I4ttI4PdmD%I½$I4|tI4$IYʀ.I3K*$Iq$I>$I$IȀ.I3K}|O$IgK$i%I@i>$I$I$ $i 'Ljof$IRQA$I+]$ 1S!I$--%I4ttI4PRYmDŽO$IPx$I]$iY%I€.I3K"Ͷ$IX $I:$I(IH I@I,eTI$ ]k $I)*X8Ҫ]fCzkہo1 |c׎֛^qgd|ɛ:+U{gk?<켏E$IeȀ.I>;l 3:W%IZ2SUƇ%y/hT_nΗ_>5ox8II#zu%;T Q~K<2 /P$Il $w=sj_όz=$i<$P pB™xѨw28ToF !h?(Ft輯bnA g`FYi)U-%I4|tItu]^3GD%IK*I_?%N:2Lor?UC%LNEaG$zFdo(w&<]%I$]$ݐ?8v¿j8HN\$dCu|'Cw|`{ԋ![}=0r@MgɅ[  ?B$I]$ݔG^ܿm?ȿ?HN2plR4|VwA IDATw޽zL{:g壂^FDti]$I.InG/ 3OF*2;{J) .JLzEoEFt`"}eyN _h]$Ig@$I}s}/QGAHw9ᣌxES#a y_sn61/7>#I$i0K~ 88kIҢ !g$lJ8 =C}gUѢh@$I%IP=9HGł.I7Lr!:['Gv뗓M;&gވ.zZDA.)<qU>tBzD矛_?S5i)Y%I4ttI44_}<~=?zV $]ߔ8.nxgG^%=E`}ŝ.۔L*AEĢ0!Iǀ.I~ՙ?}{׎5?z$I+JF\ȳ$'#WYA9\͸~{'oƫ^zl-jľiDF "JiX[{)JC$IZ$I.4ήGe/3%Ic,Suĉx7,y` v0m@אE$IZ$IҢF{g׎Qg9󹤑H(P@i"Qha;9|4Y/l/z2Gt zˆ݀֩!k@$I%IҢF{v"Qg2Ky3#|v&OօcT|M9԰_֡Q/Wbm=EqfJ3eh*@$I%IҢ]h?g{{^$4 y$9%.1%I$i90KEJ~pcKA7^r8.&  /%>=>tGfiEw߅ZEsѥ)QO$IV$IZ'ܵ=!QgY  >ޔxy)qiC>X}cH6z1Ќ˟oI$ tId~s׎Qgɭ)DrSXIFRq:-^5R$I%IҒDÿ; z=L;%.,9fm*ȈӑFtȈ25H$i1K%,FI+N.oV $O2eԋ2%I%IH$|ɮQge^4gJud|m :4J r|Ad&|Bѥn.I3K?oڞ<굌|MUo|)qI7e"- ѥ$I>$I?|] ĿZS6t-7iR NgǙyU/0w_ǣ^+1@5I/z#$I$$I wqkt$8?'9.o˳^$d!Q(S z1rb#Ieɀ.Iwx7q^XI@I> ?3Kg&M$l&Q/FZJ4Ϸ$I$I+?{vd:굌C utO2HZN&^4"<]$IWz$I,?:2vVA^yro5KZfS@In"z$I2$IN ڷ$V*y oQs:GL[5Ado_/^$ݤLn#8Nr#Ϸ$IƜ]$ ݻ)e;pKl b[Vl%q V`۩!zW_KIĉzzwuhkeRp<1K$IR1K_ϡ>s]%bk%^qz59{::=namwlӣ^$}&yFN]^4nh<]$ICg@$I =<[U/2sݐ:WswZ%}v`ː(2#Zq'Qm^"h(Lg,|$_rr'oz$IZy $ifkf|r=ĆQf[Doz44"[`Dײq!L‰r<"o3S*[|Q/T$I]$- ܗ!cᖡҪ+\J:tRgD!%T<S^oS4~[FVI$F%I  ^4K+^=HPJ)E#E+PLfJa& ӥpisb3ٙ+mOseo>|8;H$I+]$-c7䑀_%=U^[R~:B$irL t#>U4%B; W1J)mOs}s33\hO3]ng>;^/_CH$i0KyZAl'9aD Y|w!I#k蝈p$RɤM2S+vf 33kd ۋܵ/N~x]$I+]$-P2?H #s](` Q/CKFht=^n^R`K+b͹fK4gLsjO//L?߈.I.IɤZ A|yر+4lhk%r힄vz!Zɤɤt.\.m.4m.Lsnf3imyYkLj8\D ilS!1Ӧ]Cڠh4@>h!/ P( m-M'čر-,(q%ej s{i[:J$Zg?Cp}]j w Xt!ќVP_*ΖHTKU}_uREp1<"RBCD,w8b|//^8ԱX t.7T֗$:ccjwr)D *}C~-bbxiqGcǧO]{ 8ͻϨ/OdGPPE~̿_#!zĥшu:SQ*ޥ0|o~0ߋ|{y'zoąWKRJZJd<MUq#Az|BtG?yp C2^Qq}  W v~pȆykT<],?: ^:XĹ #op C^ɸ>*gHEeeMTċ>1vG"s =nsB!o} GsK-۷{={׽BtnuMD^h+?9vGg̸-*8oyM7bwZ 1M抺"N8*>2v Gfx-ތݴ{;yrB q%:"^0Yo]Q;пVFV]"nzO? D`a WGěB4l]QGDd!DgTq3_~sBtLeNOVƛt&pM<5=>?vlӏwq؅2tVZYq22nϵ6^bT] .|^ CMDT\BL@^M .<(D`j!üyf{"+DjԾ9vp@_2 !:{[x8vF-pr藓q[F~g^TCѼg"q4  x툸X:3K!߼.&@W?o !:b@̴7cdsBt$@YɤVmEMW*3z!:".!:BfT] FϽ o68d]Bt`uVEBtwMwb!:GMQ}DU~XgWfpktW 5I!:GKG8dgREgG{O\AIt&*ɓ۟[wk`3h|B3"ZYBK:-"&Я]`e~VQ4ʷF# w_Sز OX9:pH?VFۅQc!c}Z3 UW_xiq]7] KƤVF;23c2.5BFkDD\!:# LgR*n HZ;#`3d䰕8ukfufd~ӿo&?+SCVNpuREα+x".~23TyV\-Ylwݟ,M#@3&G\ ?w] p|}E#뎱Ka<QQ} ]Vvq"W~tD tVDhλ#3!zNC8 u{U~o=={2'=r:"@^5v+g*<6!z7vlJb/0U뫏_x'.`U@r :U ~X :pAZ73Ld=]7yEC C00a{E[8X3{{|r!> 7?9֑tVCRW:[*ⱌ$d楯PEb8G-qn>aQ_|}O<{c2S`ho`Dt5@YFqص\N7хxi>q~.S+WfW oG~摳3D}$tVDY{cqD.g#M_..Nxa/lUĵ1į|֛~ݱX')uX /#Q7]+<%X WĊ!- 8؏㙃  kW~g}wƮ`]Tf_YZKetݮZ]Ѻ1eC 1 bX<88{q~~̅5C/ 1>N`"t ]W;_T7FS+yW_|"s_~֛=cp?@ktV`zN_#c"}y K/ijTVz=cpe xo<@{tVBr+y{-)#^u**ϝ]cpem܁NksVErs`E).TO?tw]qUQUk6!^7UԗCθNF/-7gB#wOqas:"@_ogBθNvC.OAY|Cʹ8"3?[n 8N*Ҿf, X :p3;;:]Btwb~7؅)uҘ' UܰѼy1D_]mGBtW`hnt/weTBve>xqB&/L%c X:p۵86#^ !:ήz;v!S @)8o" !:Ϊ37 fL Dž8G2TċU56vD)谦G d5KBtF]T Ч X :p9ڜX]"bJh}E?)p:#*trt^ucF#-7'c0eO3 ehj|T^B7Θ?xˍ؅LB%!:p9נ"NE<茫郷]ҞfӜg cZ;CWt5} ,y%*v#"boRh2v!cq4e19"@.GJU\_6ZUCgn.` )@k%C`%sP.mB vǃV*v*cD ]Wv|Co}cp2܁>sZ+w" &KY}?U}}v3wOun̮"KEDDζ\D Xe}|e hLzl.uGѹjwWvnw'~8=fٟ8f~s"cu/WB0?}~܁.`MmEUF.ŭ{a'~8]7;S}u['?u]qOƣ-qf(1|@k4諒98_&яavם?v.Nqd3<.!NE +"@( IDATJWUTFtCNDQd|ᄎwNfuv99g'~1;uyk/u̙6KiO3^> K ˈ"NFĹ85v=kfϺ<8mrT7Nvnm|l)YDn좏\F)c+K6ĥDF&ɈEu/tw2gݾf'wnvv3g]ۙg^3vǀm`8Ug*l-iiX:T@?2 D".l݋;9}'.Od}O}9]]GUt;Y*D.cKi>Z'މ5vv;l8NvɮOuu['}NߍݱT56Gv;vjjl>zDϪv߿x;]'?[;ɾOtl;3.v.coAl*"V:YQGFɈx>"nvnodɾϝ.Oun;剮]}f+LYaVSJHd\vvvnTNo<]u[Gu4Ri(s+"@.G>~ậD]7;unfv}v]7<'`&GSPgOJ_ĩӷjNv'`@V`&@ǟֺ,VCOXBy : 0b}mKX:}tiO,=$xDžX*5';Kk%6Ls'8.riu1dКg U֚A( tr4' Cӟ2{Q{NE5VE\)WF|6 )=+!@.Cy JIk6KIq!@.GsMIρM3xNWM' <ҧn6Lfcg w: )B+ҥkiBRS X :\i Rcfr] *t`4$4 VLktX`ER94T` 1.L کr!@J-4l.}8%PZʹf0 WA,wXS&Ё ӛ@}&p RyJL0v \dSCKn@`UrS1Hkp60w;#VE\fXs;a:ɰT4=NXC.6͐&ЧRВ #5ULZFrm:^AcK05CZm6#aM`Ee}P`8}:,p4'g p2KI+"@l5LBД;X:p9z0 X;ЁͲp'"t`OB:2 l^>>-?THME_f:t:p LOe ǝt8q܁*4 Ӛ0u5;qBol2LOE+cloOol2Jy,+`(ٟƮ\Nk&dK9}"\Jc(`(V @s6r5֚4E[U2t`l} 2LkMU=e:lX?t`)GOch-OF^X59L`Z\ltڳ.GypEa!h9Lr80y+@ՙ@`l2: P&ƮKjh,gtڳ (i' 2t`GvRf`U Ёra!h5~RIρMsMKr9L`4$4R|lMO\z4g ,UzPN"f4Sh:dK@f" ameMhN,U)@@cfٹaȦ滎p=L`!5'RI[Cv*`|䶓D[9h&Xrl@[6۽L'\MCc@`l2܁>V lO{N7ucKUf3pwɰ6;M2> ea9L`l 8@D^tZr5 + @h#4 l$SXG#h&X*ƮHS546о#XL`2> %1Ok&[L6RF8*jl$ aя]G,U͓ &k`  @s6R<e/ 6;МX4XZHf` c@WtڳʔNU1Q`m-Aol2ZEЁM:f@K}2ИG P7 N+g ,Upfڪt-k&F6f5~RF3!& h,6h&X4':ecCcg ,ah&rzW} + @LXhB>egCkz4g ,eS6PEMgsIke_@st`Lj:g6;3/EąkhtZ:Mr48֪Lj齏#NMrT:i+ˏ6_.8?v-:YcK},GxqZ6h|m @{t`Asѳ])Z;p;+` ,U9LjD{y7j?/]ư=:T}1\3O<񯫆 !( 楷 @s6R`y 4YOǟG#kY{t껙} $h3ӚG >Կώ]ZLОM&T9}",mX}?~_TCص-49cKvR"+cײʾƺPМX`h4<Y?B#XL`AsʨG O~RkY' KZKОX4XZexe>Ϯ~0"4v-;rmМM&ܐ%!B5r}?~ "0v-k!ƆLОXN y^?g+wGScrܕ%e&dKUpNAYsGl ѯhTОX*/GzzWD<9v-ǕinK`SZHZ~s"gǮ8&e&dK#'B Sg~~-/#fyhN,U1'"#_xl-]1ckC[ K'+f~/lxWD<8v-ǁ%uv6X`hC>ݼ(o!:dKm"S3O>yDħǮek`z4g ,n9yV}nqصLik(:dK#'" #<=;">5v-ST^A4tVA,i pCOq6;xOd|rZ&ǾƪtKU Z`h-Vz5'">1v-w6rC c@D9|p4~)">:v-SQ-D[]t47`H- ވݫΜQf+s7bgaT2 * Nhwߺu7x*ƌʼ59"#کX\-/m2"2 Z R)S56YEt̙?}0 xkyPㆪ|Cd݈.]]!lU9wt`,]N฻[߶x/DmU,2.?˓>5Z[KZrdKFa=Ʌ Q"*>ӚhO,i"5iJHWċ7v!@;XnK Lj\4Pq]E֥2 LODi @+q] q.裰Jol22ͩNAi5@Sq]UEصl5X:2ꈸYǮe NJMJ$[ivNƑCƶۑt\zhO,g>{#kdLc W,##s* xўihO\<>gMLsvfmWѯTtV&~GpBQ tڎ.4'@<ӗBǮe8|mGne~uBLO W+H!+Ds@ٽ WL&k"+c2uiXjО">WbUkYWkh'+q]ѿ!Z堷 @s6GͶ8"~wZ֑^3 ו|H{*Mᇟ~G"wƮ`$E їY uz4g 4އ~nokG"ǮeNiʠed\_) bPМh|~v"c, #xfBj֏@s?fBFkYN8d4V9:?. !"k9pe\_)(МX{z꥽3Ʈxlc )DY' Y*+uSOt`/Ǯ{Ir{g."_1$Q93`}꩗v+_] qzz:"MDӚ;hO 8ٳONxDӱk9ni'^`s~W|ې@M!Dxæ44W&hO 82N_G/]˱R4/=[͙@W!3 ѣ62D2Ӟl2#u?2v- ~6F?52>W|cw'z(:iGb~OD3v-ǁV3={*#;"ӳ꾯d?;+31=n)S hO Ž߿w5?صL 6s,3Tq[D|xF|ץ}Fr> 5dVA׽'#]˔NilyxsV1XF|"⁈8;v SI[`4Q;uo3*546;qFحOv}뺛3>}ɨ#k`d<1vUD<#&8FWdEoc``#TE|$.wW7*>G|׼)qCqNա5_g$d_Ǯeji{ΝP.7{.>W%"e#3>:⣳io.bih-6XL`2~2bqcg_ƮY`U'wd}}ş3"t:3~3wתּ+K7G_.@4@{g\ Jdkr)b wǙoEٌUu1sG|uugEGf6So]D~dVCoD-*~Ϩ83?ʛ#zu4QtZ3 +r;qsϽsʼ59"#ک"2byqi*u@X[g~ECQG?ץx58Vt*W? Q[Q/'/G¿lt`;߹U/=^D~<\T|a+y t7WWsC-]W!3>ѽsQǺ*My"]=1v|' ӦW)n]Ȋs|dz:3GbF֏X⩊#c/*vƮgi5\7տQw/"Q3n[,Yvon.^!o.?5 >>e}.G!zw|o"gnOCfw3|d>3d|1*Gcf'>qo#c ϟ IDATK b2j{욀6i:VwywGş+*fߺc"1|<3z22n見ߏo[T|!zw"!zƯqC0k-<E>emK}SV^Q:|_AzVLV<%ļsƑ#.> TĠ'?:>ǻ Z>hg/޲.'#""bNHób 8  xy[FD,X?Y\￯Կ ,nFKm?\Ě<ވ SeG:^h~?ʛwxZ"""""􈈈M X$8Di2 A?o߮q(zX j k]y$By/=ُ=]Ջ}ŏ]j7Qѵyֺ&NE Scpkf>_nn>+GoFVzDDD̩4W=OkM٠%GĂ;{G)-f+Z޷~:٢aDFޠ7;v#})VЃ xϺ zg_tMSxi(e^'j"MS̨#:B-/4[wc[Q)ZYwz-􈈈Sy _].ךc~Q߾uh{zV6[(k>6>܊}]]屦xx5voЖ9Pk+/?ڃu^þ5ծFz6y  }o+ o n:<}VN`-R"""""~S 1'R9Ϗ`k#buƮ+'N'~Onٿ4d}R5=xh{mMa]g5!y6Y?Vց^j?C[Gx;)(:]D'M3 ])?.F7 xw@7rsXـ%*lNdU{G~wkM$GDDĜ&OYt;۹_tV+""7uXĉboP)7 tp85`zi練<ȩ5ĕ哪!꾼5ASy'ك--(h}Qj}ibATtuSxV63P6WtwthP&wʨ#:wUD? ſzf2%>ߘnkGQ/%""""J@9i|ѯls&$8T{D|}?Y{7C]ZnBZ^t `f&`C Hئ'RgEL0|zo s?w1[ 쯰#1W-k/OKv>C=띊ЖodOІlDZmгP6WF֮bv"qv'~ 9҈>,:44g?N:χnew_;z-_EzDDDOǍ{ dR2bOx^?Z"""""~ps7os3\s^F\Bhށ=/L>)ΊG_jS@V065>c|b8i|Pfh;0S+U'tl}]0p moPnpgrYƓﲵfKf9~ND?8Tl@ kovA[e03%>8~hsY8_46txbPU^SF&`3qpgxm]ew;jxX ~'mktSVW4>W6ީo6Epʻ1]흆)>~oth;IoY7eѷ1֛k~$ז 8,ڲGFzfݺv]as*8zGV_'_ŕ̈Wp-A4b#jT|wxPk<ӆ5˫8VVItSe}ecj4uDDDDD%1'0,!_sze koSsr:Gt~N_  A_,z%;fKl}Z\"Othw5;J!w-bzhh?SᗥrK~XFSTvpȣ:2Wj;?mgk8WzDDDImѿ٦M3sJ-߄?#]C */ˬJ[}oE[_FܳtD0w旂qgn:u3Fc_ѵu8u7\sÍ}<”Ѥ^Y?3HM5ք"T&{Èx;-wk͍asOkɆRvf_.}DDDDD@A/n`q/aZ돍zq)/uݣ^FW1}TV@6})q>6KQ}&>%q\u{Cpst@vjao7<ڙ\wWzPgԭ-~ tWK-Us{rc=i4UISwj@}mˈ7G1v #ZO@yQ#.O?ٹn1Kđ(gB[31qX75zLp`Vt֮lD^_7Wfž]ԙ6n̓NOUk0lųo)eV^'rG|csѭDžcD qǖ|S?:n+2M '#~kÈ,>o%0/=?[kݮLvTdWjůt9VL #6-vTDO0LW3QaD?('i_={"zZ|ƨ|Ss7mI49zo Ҭxi gMS!4ۦ+2%Psg#=s]c q 3cx e3"'[tm̮ٻ*Lpj ::XbAM} Þj[YgW%c>|Q^/g=r/zqy7D1CDUײ|,=1uŵg+n.˰ϙgEc!s->&Ό:6VɁ̈.+ZeFt3mTk)V݋W]Ķj`0؊u*Ç]U~f+{fξxRO#WL5#oި|3sѭ7;鎑ܿ_Zv-3ǖm{|눯PnX{#6[)T\gf3u'X`'[vgP3GDiFάB4<$z5mDFNʢZG|V_yDl3~b~LGUu9_*""""o"""bN?Xu+h|\~wٵܐQWGg֭{ nQiW)ϯzVṶ2cޞ]٘xްakS-|/F[Zk0p;݆IsվҢjo\wU}6;Y98Z >5)Is1Y> 6uMOWs-mMuÈ]vZ*ˁU [v])-L?ԙKZ ia헫Oypmi >&=]{zq7?1޶?Ogݫ8J_e7u'{6]AA?st>OM p͂g;N@{/}R1Šߑ4l[_q>$GDDĜ4|yo̠y֕:/ǟ!͊SA^0gJ<"~"ós6Zf _с;_)"zSmzIݝ|]lWjΚUSIX[[:B)Un*ңlM j)Ӳ'.=a)pD1ꖟ~/*u%GDDĜl4Y֕O0uD8zpo9W'X)'YYYf_1%~QD,4w i8Yn1`GHgEUgDvR*jgӠȓZTU7 tܮR_2&ѵL03b+"i0X$n'koף^WDDDD\Z#""bNba<.ψ8' e75%NYq hKw* ,>)bSէ}o*yތk5;>RIO"%X1S9F8:q71q%pgOIyy1["""""􈈈yIMD`DD&SsxPg-}i8[|~^֜LXS":b7;񖬭40ikDG;-;*&*n ˻ǍeyQ}uE<]+4pC7"ݨ츘q}fkh1../Y$N,iJDtEhq%c 7,UGzDDDI+@B.yU+"2UR 4譽/}YD^w5PTΈ0[8ZmUΚIUM5xRELT<]`hy)^Rxw3u H7XETvQy1ƑNƉ#1u:ㅺD,*:HtW4G35ד[Q|HHzDDDɞ%/EDD2:TwYۨnψ;zv 3ݍ4 xG*$xh{HV mFʨ-;g#݅Z\Ed uq`q)3D`T Bc,>;Sa+.)#"""K@9 4_Uřq9"vj}V}50= Z՗ \όZ~ްV춙h;k eRw7h0yDW!G IDATm`OnTF+^msB} _8>@~.,nh RafԎI^"4V4^`k$GDDĜ@ Oѥ{&zqݨW1ZEsQѳ'm$-tWDtU}#3}IZcyf#,xZh{_㕢2Qa'f#P,xlwT^MA(ݭ) *D0I>h0ϗ|Dnhf\tK,%7}^K4.um7""""LIL~jtt`N*Қ; dWPk^.FTeʤڍCO W[\zm!{`l.~j1xSB7.,. H >B<׺1"""""􈈈 .ሾ@7UD oHoh6Oс[x㜈U #zt֦"Mx67 /4h'4xӵUkZpn-˝/:҂VU.~nvgn>^Ņ/KJ,۱X"iΈb81G *="""TA -O+G*[GDD\P/Bׁ[NO dv{M7 ޚ1;a @g6IO y]h6OTH?vVo`y"Zy*v"۴E Y_[w*EE3KX򘠇0^N&No('q%GDDĜ=PJ1Q/$b OwD$f ǭ+6AeEt1Gw8So־O҄ўbo/*b'fvaDgn`GOF͞uC'3ښU=ʻ%r{B;&ARB->;!NvR|ISKnN7^hzhIYiB="""""K@9b-=ϢcK#uFDD\$Z3.0c]QU0a4wD DzEܛEnCEjZxFwbm=l=d J Z1~QEzs*y"vxOA/-7zeph7ohQ!-*zbN(^@"""b~/&1藀Z"w@CD#fL.^>vVc|HߩT  v ^о""mlj= Q݌fhvmT~Y`EO-+蛖ݱпsٷ~{q3vXdXrYSU=i gDDDDD\#""uDPkR""55c|h+0/z|;"46~cf#z4Ft PIT#7aDgX{wsV"nݪw1ˋxǧ#z#zyWS1%GDDĜKdY,ե#*XZO|= oN~ډ8֝#"=ᵋ|h+ό-L~iD}qDtuFONGRx 4b%tA 'ѷ6#Il=SliJѦhe#E#F\գtN:~aVDDDDDĥ#gGDDĜg,c@D\EVJJekiٝ "’.rIuWEķ`{0U` }A[yS7~O_ɏåVKlh SEy@55X{UF^աV^\2/17N0"9.]XP#""bNNI^jLd MB\v$*PjflҺ[fJVw䵋hQ]5=}yD4]*c0_Ǫ xXM;P1M5=X#T̀[+<ȷW^E^m\3'ZŬgab学q%GDDĜt fx 0|2ܟahkg]ljq; Ǻ9p1ˉJ ZK aD/0;8gu:^BOpjoV<^$x7 Ttփ*>HʳFއKbOV /In'[`c"zDDDDD#""bN6Fg-|NDw``:pJx6>|ڶ|ham^"zDB X/zer0Yaw :7!EdW#ھk[xʎbOݪ`=`|l׃ ?X[ÈηZF\U,:kicѩ7lOUNFWy]zDDD\~ϴ@"EӇ1qѠvf) c3Yp|1 ^܀xC#hNϷ}| C~Zm.=}"MymE-l*֣ojÆ{ oњXV=_o,:6HD􈈈*kR= `|KU.×m~v|pV> z gE 1IsΉӅ/Ftqg(h=%?ZNEFʚ#GקҺ:us盆^am[yPe x}(b3.8/Jp ="""d|2< 0ϷN tbHDX0EYC6ZkX{z[9'1&1xvf#F~G z4h>CXU|_SxjSxS5ϕ}WtgW};4(D檎]`k?+qK@92a]3 :a_[7"JfˉB+m.lDkăڋ} P~(m@b}^:#k͓E&aC{V~𧂿rocDDDDDļqڸaѨr\/wFDDDős3,BZNoZbʉZ9g]DZ哮vǙ$GD,,yU"@Ol\tU}s??CH1?AwXp?EOzu#^`Ё7!=x3w(o{Vk*9Tg5&fg"="""""./ 1'_/c)Q/!Fxq,O#"^Tډηq–_>@gGtăfx'kUOv}Nx=/6EkPe=Na:XtqYҽ.SO sÈQ/ """'ٗtHxx[3ݨ_ux w!-oI:qm=P`mW ǛI{ RPA45pC#>m>zKb r)!}Ь?lj~_FDDDyqol3 1bvˈʺmQcozVonS9.Zڇ5g>ZFFt5w1QW{67詂9R2pCp AKM{b#"-cE܌yU`)P/='.#""*sVu*٦3fhc^9&GD,$m'ktfD:4sFt>'댈.W "^Mx/n^Z+[zF|ja汞\̳M)+ ^"xxw3c^QGDDDyq\>m?g@v''hrh"zD[OEty8>EKHb#p@p{_:~{8 Fg$飗UxUp}^(-ޝ7Uis{,Bؒ5!IH**)n+2w4obl^C͘uMیMOUt@ d&KE$BR7D\M?fw9#B)[eJ:Ea/Y:[*XP?n =5S۔#xPg!BtlWvߕ/9h(~^m AOwu}c_Z.å}%gd=3CtǖO;+>:;:++O-__x)t6ه%-%"Dw8`?7!T V4o+"TLyUNwf' `wJ-?.g^ݐ.5׵Iݵ݊:k!z(X:_H/'뵵];q )I$Q:}` K'Y,>3t[XgJ!K$BbLtl}96:`7#9ngQ.#ѱNҝ;.k!zx@7-G͕wպS/e)> 'a|~,}7H!'Cejʼnd[Xk~19P*=$Tntw,}Ol5>$[>\_N!z!:ه+}f&e?|5BO8j@5Cn7YKoL ~CR҃"gRYH躯(tP,tn'%gdh/JKmX>N$}\(=&1KU>liiQk!xsQ!x=)mk.7u@/!:v+=}B%:vӵIB?g;}锤[[ ѽ>Dws[Ia}’>vl~W-}`>)^Vϯ>\Yϡ&E+~-tz\:.RqKH?*Jtly#@ڸ|TH޶0|Tݡ7'&b5@;lO1!:v3WjHO~3)ηH߳z}cj-[* +)$ѓ}*IGI:I/JKJTZkJ~Q%X ud!:ݏ0 t$Btq\kv<j>#髖:I?o}KoHʶz*DBOJ>{S)tWJڊkwPgR設Hҫ),NX:YO^Ν#:`#& 5+YTJ[t%ؤvEt$ѝ;Wp1`׈RFDOB]oF[wRBtg [%$qıg)q(Z~UY^]IN Igl6I+Btly #@܊qy}nכx[=|/#;aܺu*oE a[GWAv G3@'zsBRz7zDW\?rCZBzk(T|cGRS( k)Iei֝EzI(z@{E~mVT}ԶQݶ7a6o.ZI3?9fdyu;2-? !gWIɯZ$5sR?GŞTms / "}փI0+^ȯ "B~w|Wt].t! z_w^NzYY?)B?$8e Fxzzl_WIҿJZ&0{拫=>MLju۶K7 '9n/rD3cG8ZND=sɟ_ēajkWakUtfW-?/{ R|+٧tr|2*5(|1b()|HzyWe#)(*VCD`48GiæQa lG>17]/>a=asN4wϗb%Ǔz$){RqF9D/R r4z7";*ߗH> z%?eL#eΔ|1/ JqH~ydHW⥁-grB鹾$cHv<>E2*x5Uaa7w.5՞a˳ ' o嘑A 7<}&վB;"tg7\/PjYzt|n1R'^!ߔiKw O%}Rxy/I#[/;W"_:ّ VCZx9;K )--}"$<婱9U]mw+l|m^Wb Y~w ѱ>:PHzvvsУ^$`?a'yXz'7BtߓY<A%N%J[KnahG,*%ד>h=YV?%N ߕXt:>*sB>=;օA`rsbGޞR ǂ*!*Gz-9_ YN е%O tZo ɋƶ~_=4@okuTx=,!L6̆2_P,ɖR7Y]RBKC~By+[ ([wte]xXk3j IDATW+C^Nw%SG)[ 7t&;uhu ӉIa)r^}R+^- VC)7}۷9[ERWQL ͇̇UC͛˓?3n$:wkdfY21VUZ[ZL| J k!yh(l1/jC9t;wI{ӝ!!zWz/oTsr'%V:#N:YXA~C/8|ɗuD/DžSi^>fůK;]<[paUpVYoV7*6q#`pdfL=R$~ c>P*JE,$e"?}ºcNJC_j*DOH~؏Ct;ʡG,.ޖTJr I'Yz!Q&]X,- )9~ /-3Gkm zH>j֭$j7;|?PL=LJ]RH1S.~7qvqPg>e0i%eaѵ^ĉOSZ~-lW@ַ^^ҏjN]驔~N׳|"KIqXJlz~TrB9N;Hix ЫJR7RUͱқV6Q39ٶ!&LȷELM,eYBkC痊4wnxv`p5gS/i=]=e}[4+{-D}kp{(zy.II^>؏B]i`锬ZysI蘩E:fPi fy[^χmS3$8|1mW෰uTo%'G .%OR$ V(,պNOZ drD8~V!!tߒT:NzXD/I/Z@L|a6c?W$C쿳R @ y*Iv gO56-v ,BKYV#厥dJ*RT*պNY:IWsDH[ZXdQ/ZHTNaCJqpml7%=VooKY"ҕTk9޻:3[df Uu^4W-K7_mm Nhۻ1Do:}.)L=XN t9WCu_z!wwl}y\zDp5Dc[I'Yz$wRZGR86o-z L:fPV^_r No1־IoBflB䙅tҜ"׾IJoKzy\{Px;IvG 8ݑnf; IzvKnfDMmys[ٽ{} -}~=-b޾xrL %9%Z}O7m;CXK]DO8KTK=Bz+]Vıt'9~ Dc?P:ї>dN}[w0i.=lT:IuJQ!kF|Aa{9/t$.;90uc=d)$EP?7+WZcZ:BGYQ]jh=7]s-5zsoM;Ɏ 7";MV>=\ ?GkZ=uV$͕w-J!x?[zHzl $9Ӆ;f0N0b+U xx@rts[c!vT)b-DgCEqbқz@Rw`GmC֣ uW8=!zH!rX[Zu<)Ǖ{;[p?$)7t@S3o{lXX>,h׶< 3;?0ݔuD%-KC'\f鱤%uH{O;$-mōf"@5Cqxۤ=6m;RzLVл!]zbuL֣)̝ey['-u`Sڌ06Ecy\va홴b~ܡ]\?^,׵k]:^hqA~Vtb@:2*׃fp>.L77}62MBO~(>$oTXM;ߕC'R{ռ7כAyӶsasl9"Nj-D퇞/;'"p\"v QJ4*\54~Ew ޲M17q%i]o[')ʹ3nw`3Fm{mzjW ~\ ocvB Гn-\OZ'W2k%`bimdE)dK),يAKX\DCzjy0b96@ڏ}IRTfe˅|-)"I$%ТnPK:6 @5z]Sc}Tp^Mj7Cf%Dk,sUEąBd[>dŁJn[Zy׀}$LW =t@eTz\[^ͫf6&z}_ՎO=$!ѧ`+Uox"q!$Mց*Y z_'F>[V.H7kv^o#!48!:&bId](k]V[tw/&9t@]rWT[n]} (g,U;R&PS%^JCIw|f2<+*ĮzxV U ݹ g -O]%n*q:iXzCzk|юʸ' I<=;Umon 4mwy{0s6pZռnXVy|<֮Ȩt}sc? V\|~A` !>V%CW*q"@ Ӭ@5z[%j|T!N;F ~4bǽI~ң -lE:_X+Iq)¶;ExNɱrvh!U6,#@ӬFnʽn\%0Ux^h}Bt[wz9)3!:V;R! ɚ逥;(˺}݉ʫo^` @2lgkn٫%Bڡ' Zj%^%u3nT` ѫ`9__ LoU|վѱKY+ۃ uqTXOnwg)4()|)9V )$v,(v} qKuWAz;f%e{sKE =Wz_u W'i9.T%%[GFtD#Gcvtr\ V}^}\>j[}yc*k0c֡p/+r./@B"sNo%n+x.RҥZ)(2s uv肬;OnEs,jV^w>IP^_o^"D>(}gq^|b}( e%Ws>M魂HU%nIZ."VSD"^%*q^ + A؈Gúe7k5FCi}ث$yg 9YME+r|GwI~HzvۋzqVuCEaY:(pFxP%"@ܪzx^]9Dooda^}Nyd,0sI%BJlɾSYK'%=OX7cvUd](LT%noE w:`#f^F^U$ׇ}p2;z~\>;QFEW9kǺ(綻ؙpRVąZ)BW+JU[ZҍRJcu*q5*D͏νSqO/묒:wz̺b_("IKd*qV/b9t, ѭa5<lnoۯ9zEf[LzsV+jQֱH#}%%^y8։uv}^ +--Eg kǜŵ*Kj*:IfmT.~V1nVO* ͱ Ǡ_(s鿿}5JfL'}O:8:oeHnNf0pŠϗe[‘ P^%#tuaX^Fo ѥ!1ϛf߇{tTJ|m0\R &}t8uW/9$v}+YRB*JN+I%Z%w \ -tX^W%~U`i3/9tfi y5o$^7,@nUBVǏ w=xOoo#UJg#ltg/bd&O<9)t)ٗ J1XL)^߷ kax/`!@lIBz=ʨ:_:úq'D򞺟Aa ւ㹿],pe-8[.i8Hw%Ǚ-%_TKwN ./+zu5Ed۝"<'ŁX X;u?H1]`6ۤ=f\Uxv\} [pKto_>gu1+N+Jq']ǜzaYmW!B9).$ұlI$v,X:BفƉ$ktVr2|Xy{qm wBtœz~ >s;C>^N+KN>:$հTJ-](גu-ȅ]&{AʋG8A6U*J=%;Nۯc=zOG)cp-7W~p }ޏ*}'weg )|)9V )lI*5IZ]b}xj~ !`[m,Q!nr}Lc ѧIqv}Ⳬ`Zu-N!In$ݧŏ*q]Ni[K Ci+G^L>=haoZ P>D5[ffF0Jw'=v}b[F.B ]j̧UK7G[_9Njtiu6Ϻ!z F۸Zv[my\ .jOhapvQ!jU[mm཭P{}_iw4!@Bz5z[.WǷ͛WBzWv Bt*w,pkavWJt`u\]:~\gB86:`$Dݣʀ߫1ѫկ3}39tN4i%j}{Wv?g[SՃj^>ލ`}Զz> c C*Jc'H '`svGúeoW1m4D&Q!6vzx^]9Do|Ԙn\c߄Cl*0sݠY^_nևV{(^O{}4 с bT^ӛAt>Nj\c' wDؾ `va!zU.Ә&LGiׄ,CpFC8飪ѥ֍nlv]@؍ƅͰۚ?lnKbd/^SzwoyA?t^6*Dúsׅ{}T,CM7|v2P ߾iIt*:`Cz+ݹ@o mi{~׻ri0x^P+}nuLt*771=t~}TUz[zk22i!& {>F5݋_ ѥڼUBzn U;qqεȕs)gl29t~2*Do.ۛAzzNѯW]^|QBtUwvt~3,DW êG>,Do;B)I;W2 7|JZ у:`?j y}>* oVOҝ:78&QṴӏ/]c? N8 ,IsN.3&IBz]nf[ä:N[xnI՜/W$'I\W>~ymLD٤i\6fn̫м Ћj_Οŵ'&DYB3GںM9D/nU %9׋py08;Œ]t8|"Is.Z/mE mc ۂQݻ>}@:ts1ϯ}6Uzsׯ/1,u_%[.|n6wS`XR CCjqWWݶ7RR+W|wy7S\KaN]S4ҝTvtnhVח!zv7CWj*w__)=$kߵzԟ7& чuެFս{3Dmfͱϫ#+iJ߿8okS\K.\_ѕvtn6*Df룪{9XU]{hSgn^-@'׮- 1A[B*F~r#bp O%e*0s׫Gê҇u6z}e }Uy}+KNuʟ\MKŽݹϥtJѿs0 7.Djn}X^>ߕ{5˗_+Wޛ:t _>Vsq]mT Y]7~ ѥJu˯`e),@*0nJGJ  ExDW!x}X1чmV%z uR8_CJ]Y͞km]7ү˯>xʕ[}!_>>|Mj Dl:tmz=n-` g׏w/U׃_n1DttMڏXs X6`!@ִêݲ4-oVO3{ ҍ=I* օiiS49ǥntn][H=,DwD$>}}ƍޫ|y֥FCcYCtu.0{lL3/C*5&z[w3W7[ GVqkLڛ]_pʩ.ąj^8Vrfs;ҵL+ՠWYrpFӍysMjSurj*Op!,ܴM+wv7{ΰJ ]_u^U~kļRֶ HoUz3cIz[7ҁT?:\C=iϵ{L `_v7{:5.Doê۴Wj}(<٥{78}=9&)=p?8T%ka򵈯{:DPMMA ѥc7YRe[*nh/4}^ٶfUqn H<8u%B*:\@kiϵQ{s~w1,DW!zu|ۼxeXެ82v5B?)^8x*ы. }*\w?=T{딖Vr>7iϵ{2%^`"@`MQ㞏zTuiyg1ⱛB=)BϧtR,J?/F`Un.tFsz^ƺ@^]VBzF41xС=BJǫJtEmˑ.8ݵvo+:oow#w1:[oXmmcG]}XA]]zܿnȈk*+|z.픤:=N~_ػ@`뵍E֕{[qXVǐ |K}+>SH/-M_> |p.I_bJǦ=6i}cgf9܍K6B"- PDXGW(FRȔC7AE F8@qtXaӀ_/ZIJŔ2\Q5535;WObv~橫ghtuOZ&U)#pYGϯ^ƃy&^DnFD?'#RH} @D`{lp4۱7}sܛLܷ7֮ӶsVY^\,҉RJ2_\HY5OpO?շ~}`3Qƽz:^PTmw;\>"*I?SčA=*Oő"=Z"=͵=t: 85p@7Q1~һQS߳LD?Ik5M޳N7'MOsQQ)Gvc~_Ƶ~c-h(ȭA"ZóX8}7^{O=7E5vMQMUz`o2n~Z^[ڳKKJ3n!Nx-9Xg4(rgtI}vl_}T%)8^|g}7#zqfY$!]xuދp`fM'7KDo2s{-}3l3WR,NݝD3]@-lo\9p'sīIgGiv>~{;譔AD/S76ot~_B8|tfړ&շsYږ~JDoߍGRJő8}ѿu:Dw>ʛ2up8 M3'=bRDwo|#یkSqƠbџYl _mL߹re0p8 ?4cۓ"tv"u{[ߨLck]_q,o=wtvsы4-8y/K@c3«'M&ԧۧ5=F͛eyGn'|=o.ţ3] t}&2MDvZ5&=g5Lk|u"D^I[7s'Fn=zt{=Z+Vhݍx]@ K)څ?~㿜: B@j'}tb-^ymGHKeh1.pPHs/_B.i6=$zϣ {F9ܶ&ci=뭔aD9xk(Nrzv-p_&4gkں}k1>r hKKgSJ3OoV݈~FT5A^x\<``eR6ϧ(ӈ>f)"ND/ы7VR*R80 $)⿿S<`/`yk=S|Fϖp=}&^+GF]߸u}jD?}c0x{xp]@ "O}7s(I)>zmcߛxkzqki&vqcc?\#j+EN)"=.X>ǼpL3ܦN}ϓ{}"͜oDtF9xH*N]YlŢe_yy/&s=o\~$n4I""ll7?vt{}#zbHiH*1(9RGv3?+[E/^x_:wy!5KԞ4ǽߨ&Fq[ͧ+k;t=/l漑HnIDJZCQ|˯/{%0Qz3'={ݨH^JIDD8u,ۭ.oE…g/^{90+s o7ݫ ңcƭm:GfAGwR:ӈ.Fv>Ͽ'_ֿ F{ Q]FDq{ĝT76U~7]>N~MϩG,"bsd{P"zkn]+#W?'Z3VJ6sW[)=j[7eD+K n;r܎U~y/vJ@mڈ^mmګ!}dD72܍gKD[btl#絜jHG'o{NJ|!pp#R-:|"`^5*OӇGm0&k 爯v&#-"nG7)?ZQΉ")"wy "K9 6_{ƼM@aTDn>i|sv27j"~i#n%n]9'RjMxmBKkw""=yk;Z$o "Wr?};$1)7Y;kW}^U]0z]E89"vJ'҇oRgC]*"HGҿx߰E;˸5NG )2}vÉV<>z"._yۉeD󍅔N,crhSEJK\ vA/"z9R\L9o(sǟ{xG@gTDouMOν>|^u]]_!4D?[]݈r,b=#)_*Vry1ltluPяN~D"uAos'v_~wu_~KM@é)W{$`zs #zVC}O ы2ؼp=EVJǷ~x'";짔)r'EwPϽڼ 4}O7ۿ^O}-Qy8y}\ڏ9͈^z IɻbDEt+EsD7")Sދ[v[ݩF@mڈǛ>;Ǥsv3Ėڝ3X(~J(b=È~f̗rTl|vxP":9R7r"u\vSSo :P?чa)L'ˇGt&"\Do'9"RZ;}Hgor.]w6%GnE\y/x0t bs۲DΙۏHM{.gˆh"r%Iř2_F؈w")A귮.?k^0P5Wt?=y>3ѫxQ~qDonO;^q|ыtz=9ZT7Lo:9[v[]S&U\Lu' IDAT]:(>n&)^dEmG"V.Z*.#ND󕥔>vwg3"z)#u#~*Re%A/ZKoݞtnڈ^M۹}aHoD/NOng=VzW4o|5"N?x/LQW΋ݟbK@yըؽ>͵w#7jmDWg<Y#z(RN'עGtZi`VsD/EF;ˋ9b%"z)r3VʥJzp)`QFEj4usv?}X}\8ֈ~wVz_^X1GDQZ䫋)n8f=j8#݈F~DDnQtR.q&E]ћ4UvzۍóգaӘf+Զ#z:q̫#IE/SOesĭ_~wut`q=ޙC|v)'[gl?:.o4G<:(Fĩ=?lFD/EtrnOEꔹ)^tExj3/u{ 8Lt`"z5LM蓶sѫAqk߭>}z^ɳg^8R3s"v{aG{O2G%)qjx>.y|T@z& E>i|-ѿ+9wۏőY.#kH~ok5"V"G7WD+sF^+tXY*3_F/=CbYLћF%px^}Q۸#_qik w7gΝ{aֈ~m\=::kGĻэH܉He?\v(zW{̇jMG܋M,=YsSD$ǽ߱}^86CDc_яN~D"uAos'v;lG5PoWt j,߬d3ѷчk'EIS["ō]O?RJSe7ɯmN s"u\V/o]\~` lWu#F^ݖ)nOs&N#9ۉM oﮯʫNʼnI=jDt"":eJ O2T;GO`t`ۢ׷G<^}ADQνIߺNJbq vU"/H^ι-"z7Er}o[`OhCt4g vәчƈ5N迹{"A#BHAwNG~DDnQtR.EroXxPt`}x_[oFqS &oUޯXXޥG>tvon?T@`jeD,GD?":#-ΝK7N;t`7'ЫGE裶s'v>ɸ__c~Vz_칳/h^G3%ޭO;SVu}p__`2.Wc4|sbzujv"z"UK}?>w1_ތF6#X"rN)b9""-7r܊(Q}#&O׷xo>Ԏ;͡aD2~ycs{^s~2ǻkן^l?،^Hȹ)s٭O/ZKoݞ{t`M8]5`uڱ5ׯ?>T/(.nl+=GZ-۶V^qknDSDM)sr-诖_}?'¸L{<ŝɽq۹ھ~6|w.onnJϞ?ՈGr9XLE+02"#IElN <$EܛFnUZw+?/oU~.*oծ?o}]?||ˤxÚaב#U?=ʈ(ϴϞ?b5xqcK ǬJrr^ˑb%GRN9Ϳ>{O _᫺Q%:*=V,u1НI:js[/,^j3"z)#u#~*Re^/_n>Gغ{uwswinN)"G{O2G>%^D_-;/vދǟSSn:Gw&v"zuF}rD湅v[}u1k؉KEox;G^)"z9`ʲ׾Kw `m'}ԙݫ٫})"z=Wzu }q'owW^}A@aڈ>4>.7^2W5bz}QBGZ?{~֓ Sc K9S:e.+gA/ZKoݞ[~N~q'q~ŏLX#l="ލnDGND(E'[D_-;/vދ`'~y9YDm1.~#YoYZ:yO.?i#5Vqoa@NoF±F x'SJʢ;(Z^}m `d;}Džx=j+D_<ï[o#'?v-#{FnDSDѽSLe?a)7݈7MקѫGs#p)nު_ [qF GOG/NZOX'<@fDZmseN9zQszoޫ*؋4[7juzONMg'#9"ʧ~Ok`Z"݈׷N(:)"jyշ^,섀UFQg:g|VMp}QFoLO07MQmEyǾMN2"#IElN}[p^6*1GE,砏M1y"wyĝZD/+ ?VSkes,G^αR#z9^+ND{sy/"N#zz5j׷q.41^Ms:[z.kω(ҹwBc9G#rňo治Yf \Hl[Woƽ60Fi#pʼ)74^};G5Mק>>8:u^vDg9y?itlgGF++eY^]X]9^l^^^FyW_~jli ~M.WS=JSD#>W5a\QN?r|OiZH@Y渴f卲\\f~Sn,ܼ{}vOo7)G4GQ 4}f ,=E`^z91vUSȬkQ}tqVnɿ#OS:=sk9fΗ6s2[|i̗o{0?7o}n7=ܶbm3`t`nD5> Z믭kn4|za-gcK?|HS9b fW6s\Z2_1\^qo\۷エzKoGM7>)Z0%؏M[7Ei=LL4Wo޴S{N;N3GS/Ԉ592 FW6X|iWVs^yP^m.ѵߺ|jg&]ckشMG]mj\D5^h>}ܮL:f@~GǾ퉅SEŢxVSEc]YF\.s be39_Z+7W76o/k7.t)>Nvwݘ\oZפ؅sxt`?&GGI[7vƄ-G^>rr76BΚ&GsfӾ%:!~@^OG~eZ;y,uI^,kg49;;]708EiQM[ׯS~8ۉn>5;ޭi|{T!b^M|ܖMY3sdtY{N{ݝY̺Z0#8h's݈ҳn>z9|7^'#Ѩ>=i|y؎iNnl?i<՞:[|/<[߈~:&A@ ~Sj^yGH+/ CH@8^}#O_~:"= 9."H55:! ζʟ{%+"?F> p.|o+r9-{={p'6b'#,"Ast>,a#xDz`O=uةc ^~%S_ ϖHg"^~'#OO#Eϕ"E15:> yz0hBd `鏿_nܠ?)2`yOlOӴ9!!򩧎n"p^ЇB@ "t"B@ "t"B@ "t"B@ "t"B@ "t"B@ "t"B@ "t"B@ "t""=gOln )m>^t&LNq&"RN#t9FSn*V+w.]/}Ky~/>Z"cEGr9Fy5S戕b0xWW/sl_)'"'RY>S:"#XG7#b%^ nUv/s,L IDAT _BKӹ<x*x<眶oAq-t="noF˃V{7/}哟\h߼LNĢRJ'"#O9kq#xl7s_{jy B7m1TI28cc0yy%|TMit*SpX? ZK0F[_dDZU# ylyO!F!g`10PKLkx४1JHee--0L>@KFgWdiޡnLOCyl8#6׈72Jv=N޸qK vʫ-,ݟHw3z҆ ;Tda2x 38.[ @cnhKt͛[b~=`1 Ɓ91cb|m`z M F0 z^-ڸ_'j rӥ ~*R*])#|/`p/o"~f׮-[v^_+ffA%%Q ??E"M5U򊩎_1 /bhgm{xxm*++Z[gás|B&c̼"'*j !r ľx=UT߸c:@WU7n4kC4S ]%Wo]ߦnڤy9>Ipw<&X8+v~|sdӁ])ڛЕR oD8/!_6=xх Zs3-|O&2ӄ.V8\uąCoOs pZ&nQBe;g0 `tS' ϟtP~br3"TiiivFv _Bb;&8 Q}rȁLL%M`z2*ѵFfLLĵgOVO0rfǼMiU[8H:g A^fz1d[q:ep馃hexqnctPW:@O9x8#TЕRi!-P9== #lE*j| |M$\5)|834vEAcT]R0"1 ' WUYu#,|ގDc>J hZ=VupQK!M ƟdA}}tPW5y~u vqC-TSNN֮,l)r_Wµo/ݣU7}ik~MozL  MKG(-R⍶RJey"x:bđB!2,"qS$\#۴˄1<7s qQu_ST8?DǑ53UUH$ ?@B W%t5W1 ?"Hb-L4/Ãj NujLJ08&۸R:zrHK;`_!oq.w/n0'ۋ7QvX9eJg ȔnW)>{ڲK"@Q_PmJ#Di|qcR릍RJŀ/<1b$p{ 1ɬ<61lH*l+.ctlpVEŎ"s=gJ}zp~e89vtȲe7}h/e8}x?裗{vSr~KdII ћ7E= $p[.bp gΣhܶ㨑N_"*o͸qqq? Gv&ݮ[nKy:@O0; 7oRR*m\K u_j pidU||[$\.8a>k$PD a?D"JǨ+ 88[%\0Ued[(%`ĵ=noS<8#^אC+ n>$"ge{&rpIoasmNEg=$nܸ1xk"7tJ9=L3p1RsA1I剼nC pGQCiñJ)ETJ*++;竨f8 ;<"_HYٜ1IJdN1_aRRR*~I9x]8O:F^ 8 sxNfVF?xt[M8<4_Kx*yeHפ=v@MAOYOohGwV{USNNWiiӼ9>k+1Oa65'ڶ届9k rw2- *c7<"ϕRtJeOz]@P+ݓL8 _Uvvz>o̐aG %c/\4s5Bg.Cj?ԴǑ ,+Ϛ5˶&631W|Yw21ٲʤ<$^4 `9R,; ̤5yNwURY3v|uCyufZى%0%r{aʒ"t]IQG׌{%c92d[V9S:F/(V3>F'^gG@}fܸ<?+f|^q;}=>jlhP5tUas{ΐtRJJߘ(R/Sh~⩆icg?=QP1 9M GCb$x?oH*"k[q˂322k@(_/)1|_:%93j#KC+j r_;aq=J땜Ckm $wwum~,79g?ct]KWBΚL"zL)ltTʘ1r| LH--->_pns}P2kw#H&3rG!;x*k3wܴah@[sWcESS/tH*/(Ȭs3#{ӥQvtegMI02)''K:FLǎ*9d /c%T0@Xw ЕR)=9#3 W[FH>(K}_^YYyh[[ _˦$D竘 _xVk-Vb|kg`5pl_ /wŸ n&5ƍjGK7co?JG(': 3ptObbg&V[8F4\M~QЅ-63矧,ݱZ:B)l7RI< d@kSڽA피cv.tiCZڟe nJZsP(-~7΋ b5ifΙ3R:E n>{3<FIRHF q_閤CxrMJgV_PpHmA<+&I:DV+-T7wTI?#Tl>jT63~-эM]~tRJNJdF@/^50<tm͛H]ߠ++**j{hJj-[-+))H/xC4/뛸f>=j^$x4JG$1sFH$#fOAiW?1^QJ<*F<:d5yc^mD9t99t>v$hD)HVJ%s2(nI->_Z9ԋ3C5^ΊgSJ`|gmHg2ϗe붻nIbQ={vZʌOmzqzEEq ϳte[/suY0xtj |@tKU{ōo-QiP- 2ksПkBzp23[ 7lX+]RBJ2w;wtX" L:Fnomk==)j?8())<\%LFs @bt➝9cFhqNJUY#@HՌϹUK7(3nHsm&㯯x,?âh{ ߑnIBƋ#TM"tGW ]PJdЕRIcܹvnyLW Q_qt;Ů=09y~W;RI(v} \ ݒ*8e׮֥Hmc|ziiivFVQrF&=&? l_|p1PSyP:!0;zz}.ߎ8S%1c]A}<^. ҝzRwCR*)f8[%~PWVt^^^qՉIAIϖ TK|  ;!8@i JqXS_D@tK xtQaf6_՝nܘ)s`=ꦯtڂ_<鎮o;R*])eP(df_nI@\YF t `08@fR`ኊCCС#.HYDו)ȕ[xJKل>_ $=)_jƍ# nIH)Gڂ1V RMMA#[I,ݱ7zqߓPJdtn};H1,""Hn w~9eDդ(J|_xtG#2lNJv %8'dG PRR[n;v0q0KW:]U! fg!&?`~}U=@Qgtꟺ&Iwtń׽MC)ЕRV*2\# p +-zt@/W8?9)p_>+.;! t^In gΙ3,~^ N6숴}17ȕHG` UaZ;/E \ K%Fqc[VN9Iw?HG(TBJ)kٲJnfV}iBLnNv O$ [%0_$YYY[a _I#GddD:"VMg)h}j 96!}m}@ƍ83A le5{!qMMJ)fR:Y(?p0pW92쒒̮^N/J:زew;lWRRfttKd8w ϟ ]]蝻}F%0g'Hw$`)A-)ftRRJw QdV;vh8a?~!6/J7 ;h:lgJ)tNk{tS;ԧ< ^I :͈ٳg΄K/MۭYgȰ#B aÎtDBgOpwaHLk~,yNH5CH7(u P{)7ftD2Z3=#[R  ׿V/ݡnܘ)4S &Ьћ7H(TJ(TW~W9(Č&>! !r}Y!D;%wx<xڣ 椋\*b@ ĸN6x_v7z,X}_,-zHvvfL|3N#D+hQ ztK}`߳e(qPCb|D^NaQn %-h$BLMsCx :οpѝ0fm(_Fc o6]"bdh PT}1+I$gM:޺-/9ːkشHƿKg(T*ReeeG1xtG3i"8c²9'zp.:n8ڏ`EUUIҡ Ǻ>4K~p8tM|>a`6 Zb@ҥ~<YYf+ |<0uDD:w;w,Ր2C m9Mb8 5oMω'%O!1NdX}Ӈcw+?~m35 !` 9N۶g%bma #v248ُ8v|YE IC}6ΑHIo:>CZnoJ TJr<w8R: *coXT*++;<3 \ +ϟtK\vI>8(+cj x}F+ aHX}0B/KJJ/_J3_DÑp.,]x5܈`.*ΉD>%;r.1褝Z0J$?#p#; ١Ad :`ܪ"rƌʟywSbf|$> ;x]ԹR_]7<t_s!|L_!Ba?nw]&!#rӇ`#AgUk rDLWo4nq=+NްaAE/`Մ|` L) b$N l"o;o9lh3 9H'p,'wNBtGW |rssʽVJ))6TJ?8%~ }P?ss@t-'pztG<&_K4g8/|{$WTTbj s:MI Ay p1=$ݱ&bǡVUU \na IDAT|Of29⨩P_.[tG Zc"B\/jP(}k_X@ ' >IYwfiuu[g>-j|T"9w:rQLJBtH",)&Eu#Yiޡu1ˌ3{xIzjyc vCoNԇ8]Vvn{;~|b踝[+V:Dҍ gxNx!zAB=Ckg ቸno0/_&1Bpǡ[.\ ,yYD|߷0bw Cbf7`ƤOK+Ț 9AxtTN{5s^(hI^{)^mRqg\al9'77H(; pRwRjjǑ" smU70(xN˘q;ݾ= Ȗ9tϕR*RJ%Byy|}SoOH^ ߔpҖ-ۮfz1iUilOb09G.95W\q$Dyyi .؃@[]]𕒋 ~-v6lxpqdžU𼫥K, / ]V2X|&`yELZd|_5(oOѪSׯߚ4b-?`fќKax\yyD"Ռϙij" p|qC=L:k'9Jw|6stRJ"+| r0Oj7zM4{2@v>EJq̙3⁙Sy1 Ͻcb˗/w#`w"a-w3$B!/9% tǎwKϟ/2lpt&pbKJJ<1sN۝$̂ GJ hOʬY {!Zǜ5q >싎AzI3G(]Jbp]cv7I MߟWqz6,:xq冱cSsm_}@6ޫٟV".-nh_TWS:;Euoo'Ѿ_RW` LmnN{XJ)%ƊJ48~c3L_x+~X/ҍYHG{Svz#T-p$yG}blDۺu[vA8\=st 6;gٙ1;lK#b /)pXp*`dGyȓٛ퍭T;vhHwts[٤@)nh^f c`R}6uӦnR@ظNnk ׾ݡnU MAtA j񘋥#lPƝnUfjQ}:~揊ntb4uL;X:i] JU:@WJ|gx˸޳~U.\m= V|ճfͲ~ t'aBT/)@5ptD"͜9s0B“x*XEuufn閮ZXwr/1[R~!Xtuu έMFI@{L0NJfI揊bǜAzMJOLx's&74&ohX)o=@8 ƕ ϭxtA\>KUǹS3R*񂯔JCub'عD"[sz3SnzmN{vxH$b틪H* %ofa07mXyp8g¢>e7:=1¯#UۦG" p>{/7GKg3i:'I^{i`ON^.@sYa}µvC ?#E8cV71Dv g)oeⳋL&?Bf)ݱ a)JZisCX)e7D]4YZ@"ײ+`3fT߹cǻ߱m,|\Wmٲ|D9gH0Ol ly;'&鎮jk#Uò{,Z HtD_ՍϛZ\۸cXmnn-ްQCm_q7AJ!ot4c6vޟW2}n[i)D7]p}2Ju:@WJ%C‡;zbQUcMcx3k+d>7rQe  q*D$AEX? avFQ}EtypM7ֺ,|;"ie 리kZ/t Mޖni=@_3>l_f7o{H.oǕIwti8:+RJ)):@WJ%T 8>.mÊ!ڀee֯zef(zHQ@@|/Oeeǂ\@S$iw:ren㗎39ZzĈnyaٕYձ};}U@Ï/R*{8<[c?0X qg+rS೺J&kG !;>Ÿi%R@J2L?-͛[:7ϟ9ivo1q!I>@g˸m__t[~*Rp82pfIGē#êw^^Jw|%%%錾I3,X* C3|"ݲOڂ%LX5=€LG+Ltu?=eE MeVRy2L{>*TR*a***P&ݱ/zE/׀,{BpːΈd.ZxqRm k@?C)$H=ʸkȡC8O:/zOnI$I6WLӥz'otRJN|tA08"+ݱ`c} AE:&-y3m!J).tJAW8vmm?A~ cq8ϟ߆d=OUU3zkٲw`#qRrEJA/Gd >e+ảD"3`tEw@H-ܷ8X .!ⓤz^TI–cp J͏I뛞P)''nv{+RJJ#2=˗'M`Cl=%G2@gfjt۞2`tA0Ɠ<+ЙoD"3b0 PW7XsS):^qLmaαz\Ω -ݱ/f j؝J%O2`baפ;R*]])W>0.^{KKJn{f'1].1a<,X-k@x@-jkZ|1ݲ@sD" HwtJm܍'ip0#ȱkLeeet)5q DRaFc/5[7Džǡ_H7txpYnss<ȮRP}Nkv9wJ)t+"ϕFEK.m˗ K0-&NZ Zxq7ƕ%`[C} 7^հa+l…=4g>KgÎt><zI7خ-R&{}W;b69m\}S5P魝HBÎ3}ŊR)JJxt@'v]][Řz[ ^KI *a K:#^1`VZz錾|࿇ZنXp*`pFv=8GY#n"ݱ/f'O`#` JŶ#8U:!^ nl@gNU?fLZKJ)t`08)@ U Ic[__:JGě-2Η+/8ZB/n%醞r]w̟?#錸b<&/+pPryno3"t^ zؑDHc_{;8F_mltRJ)+u2.R}1n.gUT3{NS-p84붥e %Y by I'aƖ&m 't#z9Gol.DJ)kE l1GJgy2;b OJwfB4MzR3RJ])WttA6ױb5`> /D@`[>pnIIONEE! hayv6Y$S L={=AvoNH$A#ޖ,Y1k;?_`HN+T?xY+c爥SFt@~鎽\P:C72d=?YRt@ 0 |NO,X(Ȟ}C:>昣l8c9!Z[w?'zTr]|@T ø_:Ӥ#z"3- htC8 &KJ)DdyIe% `%6P.?r;r\lRtT\ {0qZ=I08!@'P(d?.]N.-_"ڪ֖8C'vX 鱓Mj ʎ8d __)5ƪ N:1fI7ztJo+d!3~^qJ)CJ`r>'gѦp8 |c#zlɋ3e7rDB1 ;1sΜSABln3tCR*9[2å}A]|룖?IG !oA{J)LJXe޲g#$&;:e_d`+"ϩ  :'sחD[#VH!n[Z,X'{[[8)QJ[ {q`7io)3S܂`(M^3RJЕR1SYYy(;t]00K7u+Йg)K[ti a`tÁ㈮>J:ж DŋIG馞t0}$)N% IDATPJ1طAjT6m0nP髾bBԣ R* ])3mmqL/K7؀w  d͍N"ȿ#l@ ]tsڀ b"H;;@T,VIHbMDxS:al9S:B)cQ0+E?o0yCާQbC q|Wv(+b?/]nu DO1N@` X:8'%%% OnXDv?n Uaab})DEL!b+6`% J)_~_a&tfk ]1ctJ_G5̘7yƴYP)ЕR1) {CH7裏^ {0V^>{tDO1ضXhA;8dȑ#  1=H!Z+ 4[:"9dMY!1Ʀ:dHGߐn4x෥;R_I'|Ȧ-{l턱 Ʒ#TzϙLLKw0S帍?PJ)s:@WJĖ-( zآsJm+Љlu6 a37c 쌖nH"۷[X cEauC>q;@}Ⱥ!.t}:h0$.oGr#s NkNH+*68u3PʺgKf/@WH6W1 dC_s*/SLTŇ-xBF|Ʋt":7trODsn_[;Q\=0#I =zə.0xgx{ GNnHQ)޽c:F[7KץB :Wk8ٺuM>#DOg2{Mh8@'3@^tUTx4V755#D# t"na >,x\pY`uC¸t_͉hu׬"7Oٺ .n!t&&*-U% *[l "dIsޕ+W>cv~ I@z˾}^a f.>@!ǗzC:Vg|y':QiiƺG'PIZ3gN@8e DDT@Ф#hڣْZ ɺۗ#{`:\}+S_@aB6,LDTR`EDI$9nj捨 tLnH?J/)  8ͺCBӺ(8hP#7iVwKTLn"%[7BXc@塵nHז "uvղ /e@D&In>T!=^U8:)T}9rcT89V tոFjDQoGXHTGlkiIW7[B߆n^۷[Qɤ' b @| :[UG d FD Szuw"PoLE#۬#oz.~ г[@U[7 PG 806;j/"$H\*3rZUiu%ˆ/V 5"*Bu|0k|v+pLzUP3; X6ҙ OQa ~̲!,v~""ѐ(^NUn;vvD:k;9$طoǥ;{@T'zxU5j iٝw޹@uG4\ *&Z78pmy`^9:l>x?1T>;t$"@R[n^؜zsmu{*O+PE`61֭tȚ3GdeA j_7ЉhNpuGX9@?ICd: ]>eE3={R9qOT6ffj:[Ks56m!h:uB$r子?VJꇸ2WgGJ!|OîKS7eɹ7?ahqND&"{:1JnN>r^!}˥t:8vf#Tڢ裀Z]#%-\CDD: A4޺ G4R|TTϋ\EřLe{Y"FAUԧ8@'*=gqO=~%[NΊWZ[k]CDR[5 }\CSں`S&>K;+[7o) DDeV"J,U[~q@ˆuC> w 2Qi@TN.䢿u M"uꛕ!%kI\R[ :0rM(u.-,6]yH~gZ'Px: kdle%DEVuV uDښ_xƉ4gV#FmfZ7Q1=Dt6[L9뭾D;@$ D4h./X$#4* >ryNpA'TrNdnuHƖхDyzռښnVU5г;zh$nu0&% k Љh(y$Pu4@W??lWxֺl7rt" ɾ.!:]ӪnD7+Zj+A *]pE&쾎€쉢Gm6Yq% bnC?NN arצ$ *W̎'!v-C&xc$--骛VuQP@kVoP~b&puQ$:JSv^ :qMHGw|m"": n]pű:0)eAAm]ӃMT* Dlf!(Piտh:uu QkV5~0ݺP9:!_ Tp Dmv:ïODD8@'szuBٵkЩ %*b}-ȭӺܴ6ۺ`$ʵV_mBT2 紤kPxuiݐGGN*=obi k&M2t",GE$f6EdukCw{N(rN@%F\ ^ ښM>:(V/ҊPɿQM KXWQo"~ptס?ۧ}k_;$xuU?t m9@pQ9Kg2;Z[ Iol5= $0}J]kmC:+X'Pi Yw7DDet"1c:|~ke9h"#l6U@ DԷY nћZU6#reg;Pu %TZ )RuV6Zgנ"pq*RAN<#,h):wu[l"y n"x豣oXYZυ^gBfS?8>-S9 t" |g)Â4De6_׃M䓪p(6mAѺe΅uӧ̵!2ꍑnZu}#OS;hpND$|FuCRѯ  &QN'$ D?3?D!2-%DR[}5XJ  pʋ_ei7@QaQrD 8,;9X7$zqNiUxyPpLNpN uo}o@a0NZR}u pk^_hYTEey@~y_\ ahYOJ"*3q[QqND$DQ=ok!*Րu} .S!U}JO A*uP ?U/*%La,tR+7Щ,-S,uK-DDT8DѠѻFLUnSag_'OrNt2Z֤'&w\lSh :]_3 ~~n%ښzuU4nS}=KRF_TN;E`BDDf"JQ3=0bynQADQirl}mSdYЉJ#eG}-ZZ]S0}JB@u~Ij92Q^ qk1JNK7Ս "%Fɉ4d t*U|ĮȚ|Y )O'Kwy"*kv%=Gεn*$QY>]ӉLu-Dnʔr!XN 񭳶laC'V!3T7#TB|: t"H5mJlU{XLUdHG$R/+ǚpXG1<T%*w33[tq* K{ IUk̴u Q8_<EmGw[PٜIi Dпlr[h8@'A٬wE˿91ԥBrte o_+"r17c͛SiJjvu QԤQ[wTP!_Hg2{shPS;K:GD(A|JCaՈTQ$~ T ` Q65)aS@w\fBt"Ӫ Ǭ;NLBvMm<ӁDck7ںt",7㘯gh;Щ@?fl?bʬdf -J ӫ&[nY" pr_VR37}GoCCՕr7ɒJNHShSKDWfыm9 g:ǫv<STlq]x2,ʣ:y$Љt",7n~q BU Ѓ룫:iկS;z)lo#B|6*uϗyX6n%D4(\j~󳭯 t* GFuY9@'"rlm?̆T=?n.:ʗ[u16N7w'CNzt*+Trّ1'j "E5r3lEQ˛J8n j ѿ""͛lU|@u ]\[6*OUT))I>a@nS+T,*t&ypі-ˠW*[DD: ~7gڳml6>% t_6hk2Q6oFa?X Frk],bU{뎣^B;Sͦ-ru}n-b;fk#h8@'Ay]-"c`ݧTϛ}J4 t^!?Q "":Y?YغeP?mA奦CZwQ_ݖ-CDl{4OwKn}(V҉(pӣ&Yf!"e՝0κ! ShauOws_wϞ={~9=ź-[۴-m?OK?vz4 *z賴oNK37nO *>L{tv X1O0[DD : T+W&M$kիsp`zz!"~@7-T 'SepuڪyKv7YG ʉmپ\-t"O Y_XPPaM`!-f@DD7ssE!G[ښˬ# 1<]*zڵ|0l 7qh9*'.QJe8BW˜DDEm5rS puan-xkDDWeˆX5-'6j#s2Z+ȎG/Ftr`3?mh2.gX?\I:QrpND&jsoxn? n"<%HQ\!ݍ uK?yZ#tQ?ۃdz~:l)2Bl*IM@]m?? ""Љhbp3,RSa̟?=Rk;y;Q_~ߞ3sםd@n\iqݵ:l#!!7ѺJPMܱ`S \4hmUU#;8@'mCcT< bܸqnᬳβ~k;""*vfj_n9!KZkk^kA' Xn6mKC-4$eO37`kvAKl{+-t"//8: "q3@W)*no馦Wǐqu7m]-'Ѕ Tz$U =DW9r '['Et*+22+sЉt"4ӓ,Y>O@σ#(B=Ӻ5~H=TquV}M>:JG&=^g lߺCn̘<tҙC~H|֭/AB4+9: MG\.N@?IyJ,m۾.:Q @+[GT:4&8 M ˸}N=]i< m{`/RKt7T -t""\ODɔK#k]$ӺJ\`ݐt<Ğ^DD%*tV["** T"TnЭKR{#O_x߼y)J IOWyFypJ:: ZK Fn555Elѣ9@q4@z8:!.N "d;3-Q`ZkmLJL:}[w?_)r{ u>F2Z ͉GWL:]iADD{=ۣ;w ܭz:JB Ѝ@_tn>~oF"7{{d[zf@ɗӮinrQ|,&Y7P59mV .Nn9: 7jわ'cGCFp Hl8F7NP(v7MmB9t :Ϻ[Q? nݐ/Pg@uф[DDNqNDCf.T#7,n`pNDT.}xغ3Ϩ&$ cP/ׯgAduC> :׺/33m;WtDDrѐ(| jn;P$n~}fݐ$cƌnjɒ%#=˺Ǿ}'н.+\xIDD4.lr9}u%ז)SNVA}N,6@$p{O*Z'39: y&q?zB޻(Fs uGduAN) DDT3mP]a2K(Db)P ӨQ j24%7}PO/]eADDG&:8h싨 b@D}݇T.^~>Y7Qth38bJY 7[gO0E܉luQ_|)n: IH# ybZG8ip~ND D4$Wpދ;yݶP$IeZ7p. j\  *i""*_W;qkƺ$Hƺ :(sN2\`ua_u: y3/!F$t nH ~ gUtlqDDT\ptܴΰn pYh. 4r^WUuu3iݑOEND䄓7Dl]co<s 8{cpًCY=ۺ[ɺ`S6<Rrw(ɌXwO ZGoGd:D$_+кMJ|j"" KnFmmNHHޛlV=+WoCkzDn ""eC nOԯ_ar)tp@!zuщԯoߩZwSՏ7o^ʺ $@k4o~6U>@n]"GמNBjC EZwt\4:E@TwY7PbQ-t"";Qabu+*Y'nH *#m n~*DDe(~c@I.袺ߺB:鄷 ƏeAt"BhqS~"r:"?t`UW]'֏2@W @/a*TSiO9]`q, <o 㧃zҨ_*Qr=s1UQ\ 8Ӻ!T<Ǝ|uQFJǬ;*ЉpND1rd騰ʑ#Gα`s;\ĊOV'G G8:#ʕ+uG /fADDT['H]li#On}v$(gp,yuQ]+jqvzuQ9 nN;N~kݐdѼ ~^^FÛ@ths֮Z7lNݜh cݐT";xS ZgGe4@XwSOb"r:ѠFU  =DǜDTn ;z;(7Y7 "YfuC1 Һ\*'Yw$լKfc:Rﴎ t&WYwfZЉt"*fP@͖ʺ!А;H G˗ﰮKillgADD%gNAp3@W b#zHن/S/=CҺX uQi'|*ޅNDT\QRmJxpNcc#Z^mPZb&댄Ty5!o8Lgٲe[[EVDDT\AuC$)Y6=ma~1H*[cݐt#kMruQnoZ~ú#==DDt"*~l#Ժ҂OÇ{Jγn)2"usF__ruWk-n嶁e,네/w?GQkuQ:(" 8@'RGR-vT͖º<kOdP ?.z՝J",n( ^ֺ~b&PuB>ŋiDDTNX7 G'LiQLq3@bup8@7?{~iчڊ/~:?6ruם.R<1(ok]bK c͜9Px8=xݼ<.E[XtuC>U\n0M2@uG>77;L_ZtuщlkЉ /D4B Q+а [wtKydϟ[w `uGGW\눡Zr~~ݺ"7 A  ӑA"gDf?:aHb{ X7IKcfnjxR!+֨4^Fet;3J D4nmں0ŻZgS,N(3է~# mѢECGQom* IDAT D.j\gCԴV_d]Q,!X7t# aD$XwjLG:?XwH ץjCrgEi5nADT \}8MD+eA]CPP7+K>iQ[#jغ(]3QDD3r řm0TPjb9`nN뎣htuPIOpMߴ~nopF3j&Ybxѿ:qNDElٲG]|r%KZw HQ^΅8k-zu`566NQC!=Jj aG;E?cADDazd(ɼ?;bO#Jm'NdQ P<`q4uZNBS_kN$?7ʻx̺ 9w!D_~<8eZ۬#$$"r۽#0YOsvDeiccc_dɈ\omr`ʕ][>~foY\N ݵ:c$a2Sڵ:Xuˈu`LyB3)-t&өB qU$8p h8@'Yb~iqц &Lj;npvf\^]W<߄buG_Eo "mUU3P|yuB!tiɺ7 wUU=k "[7K75Oyu@=8uu@?o^l0huG?jԪ[kGwۺZj:(8@' 6E}cѢE[׸qg7n}iuŝ;.\|;z/ZG իWDSlâğ]Ƚ}/pMzB;=uE!ݼ Xwⴑ#,p߼}E_ш(|x떁8J0w.(I'=Ej܅:X ЉBQQݾ|u1 *Par"-z5MySpE˭;zZb6bXrֺT?O~1DD5Ϝp*oGFVʖڪOb>7?KOܱq zS< Tp.qO[GWkmgºctVh6}O_[w @e-隻Lru Q[AYUo "J*Љs/\!46iX_ .~u BwsꟶV2-WJ-uu%w 1]s|ٟǺe14϶NKkz~XaVaf1Pm˴4C\v:->]x'}=Ú|7fws64}uߛ<۶mK'DWI!"rnϧ?ְu<>s$K]"$@PZE[bmk]ְXmOs_sNmK2ҽ{jmEP$H*$?? !`HfrM&Cs_h2'Ƈβ͝;1 _q̯7Θ^iy.)E> &0?#i[p\U5Wj3f\R5cX$M`eeev!BKƕnYX:v; nm \X6Bv0q'eX--+=Wy2ž.Ȋ?-9z MUƖ|Kqe.-cY _h} [tLx12ޥ_lݼ%|vѢEz @? Q_$:K'O+N-ttIY_UUUV]]Usr*֮ #d9tUH|xζnQ7Nri$אW9LץcG?dӽ yaل Ö+JebeCȶ+V?#)o\eٸ_4yr-J_MlvG >998qߢɣ(X2r0X{~MUظM:G/f5G܊cKXV7=,{f鲲ELrB\Ȏ 2tO^vN[Bx5K,9jw)&E﮶IڴO ǜYtтs@@NctŤk׭sUUU%)O^H/)LSԎ9px\Rxt|[UUk??_fy|';IUҨdB8 eo>}~SDu:y34}j{ߒil\*5ktL<'fJ,W򅥣GhY|7yzG顳K_2)#7 ^/sCSrV$0yB8&w³>&d{ұ_2ԏgeV{1ǒ:GԲq?hؼblұ_:~*w}\{?CBe2}6*<4t\}:GLw(|6Jm@7|Y{}.UW_cVU"IUL;cc8qs1dܮ6zIwy s)7<=+t2 +5tЪlW%sS>xТԛLwVFɡ+_|{\9c*8BgyMM-2²? " ?l mKǕNT:ǫhXɍѣO".sM'6, /<~Yf2ɏ%KroYr'<x'+_ar:{aT46Ҳ9$I/Wl۪ ^lOIʋ8+'lm-^}nwi\:[eee;ׂ l8X*++;PpK]_6ⷎy$ k~UՌ?;w4YSm&GdOy{曩Tjs69sq'20nߵ#/kwq2鋡6SdH$oqG{={֯_?.n_f~(Ёn˰佼ѕ0*ɮΝKǕ^EQ-/+=xZ_cn+\:cRiW1\n_Pe/encYȝQessNolWdp}M҈0&[3lsٳi᪒+:g#UӦM;ݲG.q~Nߓ?~@^1Fst.~| /tڵ/\ IV{Õs&Nz@ܥ9 }mI)zԨQw^]]\n5QfI*)螋=e_\:GG>zF4H:*t,{ɤG=n?Eњ.3kf:!ɜ XRsu1&+ 6n8z޼y~%z~@E'%wnY/id .i=O Wyʧø98扩B] 9̝+행&Z*t\[ZV8^l׭*XFvxSYdX+Zq= |]M.K,qc%%#[Ig2Pf@]KǕd V4I~ѢEC#eJ*ΝifY}ɣN3cs)S1.p%=}SCg2LZUgY2K4&\SS'UUU 5qR4]֮2{hk:a(wLWIj5֮{>S5}~_]0)*mrO:D>;OUUͬYܳMqU5}eIR͊랻 3Ky)(I'3ܻ%ӉG}r9CKm&Y_T,"wH$SRƕX*UmrnflG,!QNkbf@(%)#7+Џמ+7.-+\ 1as%va\:v|=+l9,3f"ͯ+$חb7[FPF$-Б]ȿfͨ8wo!CD:wZmz6/:VlPIgwj~e!խKǕ.Talkf"֡[_fe~ ZZVb-QkܴEQHGJ$HVzX2!W~y}yI˖+.BgLo]@ƯXҲ?{ޭl˿ )X裏*vzxڲW2hhx%K{k$L5,Z&{_=SS A5<\7Jnv?;$0R59`v/c}?iw__sS5? ȑ|/t| 9p . }Cn{57#t|˃e%VuLW3j##ʽޥK.;͊r_c+}f1)s:&΀br=Bqϟ?D:=R[֍ sK[Jf?tc"}uɣV`)$ӱu+0P+TCqgC"w}7m`6tTŦs}>t⮻f_%ik,ؗ2cƌC&eycWCUT:GZ/_]H܌}n56n`q@7z ʗ~!J Py>b}#t\Þ;l`oNxue$fbUVVvNwV% ty+:ǀ9uu?joYwpf^Hj35g??t` rա3 (w~(/Г KJ\rd:G!p-Vov wY|1''.Be>j 9黽$1iӦjE%Y=5$3Ru:vw2zn{;cc `N]ݝ&mg:>|[`q >r*olM qzO J&WEcsJџM?xdq%U IDAT\vbO2(|w߽Kqmr1$Lvƍ/14DoC9sno ֙_$eBgࢢ6f}lt9 OV4:FٹeW5$(}t {FVO5)9D3L^ѲRdz.t3dP KӤB芙,C!f-E פ=gnN5͚Tyެ,Q @(T*9]x5ݸϛ73^s~:H!_/D/ H"fS9k+ }vfhkțq[ږtMۻǟ$ξ fw1aA9{k\ȍ˷&tم cG>ף$>`d)[7n[9AfvxrD]OtϠQ'py(sg?b)e7ϩcISWsQyzG@f@wN0ĒfU45er }!K5L]ΤFz͇܎`ƯXLhȥBg)n0+C,7o9Q?0NKڴOgyC\vx\@[g:K0Վu mLn,ezhn-skE従J< Ϝ')tȤs^Hl̘1B(p$wESK] }RRC!Ċf^-?sSy}Н%aOo:jGn>7tB`1_{st.\6neaڴi7D٨8źI獜Ty[wYGu ]{SyĊF_;wɄ],㡳s.jgUWWǡà7K> /vCd7d IR'ϗYRѤ!!,ߞ[/{B/J[[,]#Y\yWKRʖ[9[* :K3֬5F?,iW<93 {2.a:8r NC6mZzĩ-\/i3 :=3lgZDߩY?2_,.;A;Tj<~Cks:@4{-ڷ3Xݢ,7|{,L[fJ~(UjS&]Kj }Mhl$VQs>: tp~N/piu,3 ĕO9\!dveOvue$S,X0˖=|=Zrk_(lٳgo҆Zk/Y(ȎT*tmΒ'^2EypvgB(\@rlAI @.U6>t UySB=:-',?gKI^rd? %^zye& 8~RԺdH&(Ycƌ9%wuMN;=RTTd|X=q:ajl@5o޼̜[Q,qǤGBAvͭ]`k$uX)>Bns/JF%qO:cS[[فOl"喲i+鲙dkLz)t&pf򦖅.u5-W' OpAQRڹBIOΒb}e o;w 7k뽒gmy-`~KMr#t'L>4t Е\cJ߮RRmqtNŊBG)$H))%L{OljDwuIIX[KRF 9]Jt~I8Gҏn-@,YQu.o]loػ_;=p9rH*)SVn\@AHnoȤ+y,yf)yRT*U;CP{_9uo: dΑh^<|qI'$*Qf%5x"=s:K}W;/p3rMM-)"@ONCekW4#TRS<6!t^*O'LzݒsJ._޺Nuͥ.~lI/%όㆧ.J^Jgq_ZF!/&w}fX&ThC4nbSܢTؿMzQIZ:KiHO9Ih. ^&}Lkfbg5#rչMKfZcGoM>9'M:ϱ},ޱhѢ"{$X/**> @@Ppk~'I#;CAKj.L!tqIK9sj×JVR":/Wܿ&ϔRoKj:0]'eW6ϵr}^Й%v5qeJ[[ }VtޘظamYag^Wtɯz:Or_q&<&YU____{š T*C^+S FT}GSԎa;TSu \+jmCS|^J}&J=:*_|{ʖ[ӻ2aL~zEcg&<Ё6=ծy5K.Iq=`be˭Q<]·\*V65k `cm_jgbAnX2vw};t,w%i7JZ#ŏ-WwqG1HS  Z}mڸR% ''\k]vӦΞSWw(mݶ3UW;_)i}zQ2zbc=ɴeꕯM,[\lڶ_.yNf撆G64}z]KW4E5;۽?8@]o֞Λe3 +5tЪlT:$EꖄΑ e'6ٵPoogMmmT8J1}.yMd˒>]]"=m=Q}uUͺ%e2r=NL/V4djљgY4(&6dJ:S/+g+Z2),-+ܮCrʖ %xڞTݙ^W|{kCG6ٍrN3h%F9k"2%0H8,{qRw(f~NF?:@P^ӧO\*ޤK/6}n]_BA5k֩%_fi 0uh> n8ݭ:ܺo cdo9B13ȇ+/fhc~C,դ +wz֝3,pqGt>"4tW^tUacۏ˛.-%%#EoWI' r=?yy,)+}6!tIR"KŊ?2ZXK[f7pL'>WYY9 0Pn6cƌ 2nכIg$Qʌ f͚ub: هmmI?J;6Ȁ~:Z 7MV2쓴IV` RP\\8+*8ƲOlWѥ~Αiz7kekև˃|iG{znzK |)#*ƮE^DeKKч%I!^L;Gޮ{K[[},{fiu]OXejk/(ZdKʗ/g 2wn{>*SeNE$趮|@.xM7\^_4%|=HoOxL@)a9sR"֛\fUnȌ'$=l?FvS4k֬qDZTf%JV*ipjIc5dS'J$- cƌ2(ƛc)JhFlWtliKdڕ ;2ӕ~ y)t:2hG,xR7$I{Q^ؕY7 9Q&1-+YdgKn"K͟VnMQQfQ/0:'Fvd2J%w;ƤA.WxlE8Xdt^g%,X0b aft6Yfu]ק KfdNDш>㴙m(**ҽ@/k->Np8D3^vdv&6u @KkdQCC%DGę(hߏ[R/l8u|0*#(:OP(:(D$ t$Q IH@@:(D$ t$Q IH@@:(D$ t$Q IH@@:(D$ t$Q IH@@:(D$ t$Q IR2t'$UH$i1t?åmX& C4tCg@A$i?OIZ%I#f!d0BP}Jzi]Kz\/$Z>C glIHz>4_RY~,9B _t(p@vJUgS(HݏyIT8 7$lfCl@>UO$8 bIIuBc.f@klm z#IߖY<-f2t- t=E~ Y%A7(6w7IwI`,AEfD SQjQw߻Aస;%͗tuϐt(w z @O. b}~ p8> s@D{Q p8UPI͒Ny3+NJݣiϿrDfJXDd2zuTefs}R tol#QG&;M9d(P"}f֑@.Q8$wOJjޞ+i׊ ':݇l r=+ yG&]td2ޑDoI/Px5 :23#vvQWld\@pP>R9G:S@?ՑƩޕ@Q8K*]mb$vD^MBP.͓ۘy#Qܻ'Y:COݕlfѻIS r@=)>#s($8V&,E@p0gg?\yttr@P8{]'*OR  tstOHy)= }'ҟ@a_wB`_w3f+ :D(D$ t$Q IH@@:(D$ t$Q IH@@:(D$ t$Q IH@@:( IDATD$ t$Q IH@@ ;Cڕc'Vm/#_qGɥg}_hh^nuI9L^ c &8Q-;}qoܬ[m;j"31\'=B'3ܝF{>qsj;xP|}}~UMXۭc/?Zopre׊ݿa?׽Y'<2Kڸuֽֿ6lުM۶kڼm:2vRL##Шc}S#&پ-#-cti[/U')4QޝFuyc F7%Q%kdYv9v*,$IuTMzfϙ=̙3KW%&Tũ$vlٱ$[]; cA#K$.;'_>@܋h4u9 (5o܇S7C8IEw ){&gPpn1$!ho_nœ0yKDrzƼUs5IEfpsp _Æִ6ux>OMu8L$~Elwc[^Wsp'{@e~EA>wݍm9hmis:0 gK$`c7Xp$ _hn{~@ZH;8ߚ~&v21O{ӎҀ:~/&"""ƧDD:EQpg\@~1 Ë޾pݺמ܃yҗH&UIFQ1Ʊ7Q`w'hkL*8{t 3 u^I{.EIΎf|Uϗɤ7WmϞ- )ެѯxd48T7}Sk'@/bN};4{\*FޱL^<#yWl??ysx/щ4ո^?7T4 `oW|ڈ_f)Zb47h9(ʚQl {GW.AD:Q*.*"8u J67sRщs;*>~ܡ57֮~~2|܃Wx ]M+6 /w.\GRmm{Nc1y#)<;빉M)~? tCEDzPv+'=׾ơH:m9VsWqn?>4]',S!"ῼ&. yjҐR.Dbq'<PϨID&RoVO$wO,y^cDtg߽u2e co 7 r~baZPY^&~t*c~C8X?kw)onR߸:hu,ۅh1NDD9{ʳ]Wql2ܔH48Uw.cwB2YdQ6 әR܉Dc_m =6 {F<RVU? jllOM5NZO$K ˆ} cI:ިףUZC5&k T*`Zia6O&_Dpn,"X hNIDI:Z>q&$̦̃h<׏nſ{b7*3jj5PƆz7ݍ. ׾3W1*pmo(7𹃻qG'# {gxn;FnGh/ؖ5:hC4T5 l1H]^n2!#OQ‡C62uV<Ͽ:%{&^ܻCJKi""88u)cljbwgkVs*]rTz3RU*> W7Fqot2sJ[m מr"=-y2fgj'~كY%f1ʁJ+oH̞KЌoxz.y#-փ$D`:䶌L/]QNTVlhicccm^Y/Zkk~e^+x'"""Z@'",,~СOY3H1ef|'a6:oɄ?(娀utE)QuZr13`L-ExY>VQ$ljkrY^=oZe$ ~rg[;֎V[W:f)u!*enh<hVh2lP?4hLZr#D:lhpτ#QOM",t Dnf>h-a)~|$Id7_| ux"\;Gf-ݎ*O5?k}#99?QvlhLSx*EEW0$^|`kof egoJ%O\vgοZޟ~3 K9VT`gb$$Ф?Q-jqAQ􎦮@봨UZ4TYRDŽ#QDDDDkDD$en)_.uN>s@=DtSxdgny{bTq@+cxlmm6KK$ HT#mnÿ/5sxU9b6[9}h,+F&(@A v5G fBIuV V G%kONqԌ umSEDDD:Iy şٳ ]Mu*EDrϠTϓa13–zcNQ%L=mr=Nb؛Xze&olS1ҒL*8!1Kܨ-ų 6KH4=CTkīǃ!M"U8Sgz2`<Zk4TC-cbۗqGv,X:-l:wtRhJDD$;=w[z7oߤRDTj>!֨PYOZk`4m Q>i5S{ߕA LW0nwIصemet:&wrNJfxt8ThN8&S'v648N}2B4ZvBRh"=t ip{!moo;lv1x"""R: {Mz_8GzF/M &+q`f*F]sڅ'I\gKC*.*+nlrgx\RJ̕ouI٘ppkzڞMRFAxgT$`lJdRI"ځ4%F2F$ڷzs~~5% 6=8jZkA8%f-G)v "" t""F߰1GvvYiU)"*5UآN yS<$. z :35#@OYO¸;"& =Vn2En̵>nZ&6z٘ tzUH:dcnQ2ϛ$7$TH4ZoZ7Vۅ*'H$YDGium͉w!"""Z@'""!I/,7d TZIWzU2Qj}ߘmܩ蔛Mؿy17Fn=ZĄZL*FRZ I%J7ͧmk;~nE&;B/uvWZ.[eL >-3AsA,&##Ƨ!G  t*DDDDkDDV,媇ڱ&A yRImmykאּ%=(e{I*>Ol;09TXw ٶڅK$?x俉ޣ>[ 5إ;&Љ(=Rs[ck :0qC[j}($Tvv4K32E&t頗T]\R1qwtRrb4+먗;=yDiN+Hi BQR׌{6 >}d#^U4\,D۫7ոoGPt4!Od7KkQ1NDD)nAnG'3`hrJj}G%h z(QhHhMWMxM;aseV+!%0 55UKAițҠYeh5*ӿj*#~7لj؊Ihzǯh4h,ㆤ\Ż@ t"""Zߘ@'"dgWookR)*Uir>y|u'`$rIDUKo<)~X\‚2LI'FkSMTVjӽVKPWWUB@7==j"8|ө/kr;KKv4O;{j\-3E+I\Xr^IZ:JpgX|v5t5թ aDsCuafArJeFjX`Zj5=jcScnx`4T_S׆y&*vMKrrI\C]D MgD |+ԦL&oJQe&8cprJ 75;'LHJDDfnB'5jT"S16,1"jM$bz4VSKl^2uwܽSc ,fα'"""ZO@'"U䪨Z%'*VR$٬ zIEY+R\NIңvmA^H&}E*NIfWU "!*N="3=OoW `haI⾳Ж"Q9+Sokk]93$bC㉈hcV5"AVEA %I 8kOHWZLΫ Qd7tH IDATV $g鶪N6^਴HuFYDTT >fѠ9 :GeC +Dc ~Kl_h{cMC> pz\7(﷬QhJDD"X $u3J8$$L[_0NE^!U( sKa/c\KDc]ޡj`NEմ8+UC[J=B,H&+ \nK,FŻ4E4=WpxWL~?n80/{](pg1H^¼6b\Ű/}z^ z.^t\F+K|+ԥ":~@,MZY"=-U= }X0NDD+ IyJ hӧ)$+ >]TFc" O!X̙]cwOjfh 5THV/FxmE}, !o&?V+ThNRDxDhX􍋵Qo^SbF6NЧ)BjoKQ!"V"VgDO$k#J\jTF#4 E|y4z.٭ o̸%a Nړ{>Ƽ䆀 6ZY x6 zX,F"\ 3)4Tۑj!?89dZ7{J+36)Vsa4xyV[#]"O>Cǔ)5 %aHLъB KR 0i6^.H G*NZF^*qEjz*>?vlhƫw"-6 pm!(h59v -u0mt3]6s KuZԻR]HfSo\N6ՈU76H_;s_z] /:#{]fVbdO$T?ϫw㉭'""Z@'"-IJd{SZqd^[$^'7&d‘N#~&SCa|'$:\Bߘ}xp5Lg86WƐ(kÈ/dR{E&pVXT=W%@iڷ@{]ll>0\w9bcqкjge%'7'Dgo 7k4|}'"""ZDD"JD>Р td DQ>ur8cO5?/4-omM'wK@.%ymP׆_9^ܻGvv~ʟf㱀tuZmMm?x"!%[<⿧b;*V,Ӿ}膆x"!6HnX"Q\;Ա&_=zT1NDD+>7XR.``(tB]#HNwNc@J c;/_rUcqkC)Vn Z eSR>u?9VA@x"b?bsKa5ksyZE7[ XxE‚,?wLlye&cF\M!F ,E/s c򜈈HъbqZ\#} IɤI*bU"HT'n|?-Y֭1n!\H i@R2b'*9# tŵ +#SU!uM*B7Җ%:>H ϙO,7};מGK 쉈hEIE.Or'}^'@/:&IɎ0Xo2“Zkj[HZv腿)*D CinĜ6K*igU%#"q'eK{H#;W c&ЩM jP( ~q*˸GpnqGa2!bKvᵁ֫jZPGh ޭ=ո?rMRQ0&X<hV輢D0HZ=Gx6 zDڳjm0uH* t[^_8yJADDDdDD"@'I J*UZVrPqR ģIUf2 O&Y,,e:y忽?y9*|iJ/.;]kSFL<SbӎQhLL7]i0N@Ny0sمc,Q6k9ϣhX@@QX&nrn?.dēqxu@BADDD%l9dQD }xqj'8ֆhT=F?3z{E9%</*$lbգ& Uf4T85.`,BG;!͒sE[A?ov@[BU}Br1SJ'I yp$co^|nt6:$""": {)S)*UFbh%2K͒z&#~t5\jmI?~|<|t f`ܠ'Y)FɟXE;_|,vL@Mm2 z6QzdR"D߄_hmknA 'safa }C<6ݵ0ԗVdXUJdZ/VCL2Ie)KvP?L-p۫ON!0;/}t6U2c1}nŵA b.DT#V c 47u8li9D1RtL@TVOs[`O֧,ZGvvݏp$p,p) O?CpnAo_h/ߙ֦-GDDJM_v͠IK&UXdJ6heԼddYr GˎoyeR.KT^&>[ drm*ZjU(k\ +Eڸ+41OOkDs ?7uڲ>_BhT@8MFجsq`s>_ylHa%'a"%"""Z;@'"UIg:\2!7Al.I&L8RqLY>)LF|ŧIإH.P!XMrTDl tZ4?[D`fҴo *ЁE>p{j[eI}.?qBɤozh^~_=?e乌lNDD+@[de)3cz~Qxb5J#JMv#H)= %03'.9?Ϡ@RQpDس{ I-5Ɏ-* εx\7> TgWlL(5gc`Ex6[r0'<_$&S_@۽99o$F L6']+vvVyBhSDcqUsU41NDD+LC*EB6b,H&INEK۬LF=JN-K* N\} SjX\eawZ%OC&?ov@ͬҊ2s#cL/Cީ]twdwGįE3g~P~IտX"!t̍ LZB<-܉hEr|f栨 .G\RЭze5VRȵt0^2:-!.?Ws+-R-H*Ð@wVZU44+ ?Z\&vpfT&[2l3_}B֨?.砋hq;yu!"""*mLъjrm#8B *ECQ!DHW6JQ>$I%gZf hGKU^& S1*1N =(X GU(x"HDIz-ZSn2Q%v8YML<ήj:}]06r|b&8dC+O' 'h=CjF߸Ox3z:Yi^'wN;}C ofpՑIE&^*\'^JH&k44llXޮvcn Bn KuqqcF,7\9>[}8p>!͂IƱ*IEh\Jcdi8}b * K?YDDDDk DD"Vj\JPrJ8nFpvؼ@~T"43cVmęͥ][aOCKTSE;L$ZAShk>ȣ-҇}WlMŶ.4Rbr<sϧIB:V[W{ >.$mk7j[%Z4OG-dV3$TQ( : }ڠWn3S@ [*Nf<"^Q SWuG/TU+WǮ D SR4mfQhgCk̗ޱҩOņbvSxb$!G""" t""ZU&~ğ~V!/Nj#F8baڸ{Wi!:g͌KT@4W)KA3aUT5I/rrI4IU|Bsم;d$"fN'7׮fV#չ" jJgDJ)%Г>AO&. wG&T  rM*-H} .&Vh}a_[[Uhe1@Ss7ՠl{TQdALL!0;/ŭ:T|j`2{] QzhM5;6d;hiwڅ0 |&go^K$x+@d4db^#QO=:}%6KrDgHyb[ TuZlkkYwG&Tt$%P0NR]c$E*(YAm#U|̙ܙ?"^ d;m*.z$𣳆djXdzn He%I%6TYPQfV1"v%36zlT2;0t}&}wfkq vlA2.?#w$@O#hb""""-܉(VVJnJiAu/텣8|mzXŗ$M.Ey@%+-*EZQH(ek  TOO$sѧ5 GC-UBxdէثPfJioQ? 6^|>IooWsM{[x48=Z(ZKDDDT@'"$TY)E. {RlX/r p$ tvZX°_&>] g!$ 3_r,eDtMpv>KVXt33wt-7qTRgUh9it:4Ilb7@'"7IWxqOX4il^.x٦Z"N,5]}r.rWY+j龳@l 6kT0/v"iZ3976LDkkY >D{D2; t@, &gb6!.0|ANLmu9_͆: V}]9 QiaRrVZ ْJ旊E,Fݑ "Y|h4(@exyznAxesm2Y{8N:f_S~ȌH0# WD`K*FiD=um2:*FDTDIihDbu:dLՔ@=m;^^emo8=CEDDDT@'"7I'8wOh&ɣfhvu{qJxmD$FOlj/dQ>AhN] ]_mQۍ@X<مxD6xJ}{f!^hLc!̍۩;mק%ס1NDDihoV#X|ApVZ(`jfcS_]Mu*F=]<#33%WRl- $']AU*F"gfzTQFw5PEݯN׾%l˖cyX=qglRh]Ɉ:/Y&#삿ODD$f-6*p$w.%thm{L2QrnJX32.U hVqmc6-ꟊu&ųAE`V U-e{?{6BWkt :Vϰ&Orj6z.'qM&ܖ矜NbD Nb<Ұ7?ݩ'%Zd QZ,fTh, r7I8+pHTt/ET(SYb6ڑT|}&N'"""Z@'""aGb(J IDAT"}L`vou[T޾Ij{o^nE[T%6{֟ݫR$N'軚Y?ܜʃݝYUF0F]4g*W$DZꍈ; oP||[m vvt JI=z$è?X<ꚖZuڷiѠO&0\jiWKsɈj*l9+,[%栳 =%ɤ{ """ t""Aw[q g4ֈτ_[I.?} hǹcC}#>~ =Uc3w.\:f)ѝ8E1=v7Qd7WOϬX"w$Q:" tf䋚iF9B煫%_han,\x\UִD489DDDDL>$kw$BS^v1\cf>z$ڷKʅzAF:6C^GDDDDkDD$YeS;*Ŗ5q"UlkoBD29ϩƩKR5yBjcʶFljDE'M[xo^*EA}h@K|S8+Y˅rzz H4DDD1NDD4 xdM?}\b*-.;vn; ~y2"qfϮ\Cf- -RDŽ#QD yE䵻Rٹ:mզ( |M\/}|ˎ +$=c^M(%⿾zFն <4*_سM;śWʍ۷N" c2[vet>&r x2^Yʷ&!,F"Bk[NhzJ276DDDN/R""**vk9/OJhoz6pUU/dqt58mֈf+ XWpƽ6Z|^Rڍ1 6O.v/H&qv~~ bqQyE?Qɶ2;"<.2:֨׫f>Wzj3< uո3(O%z䨴b6a!,qb`‹7Q*')DDT66z#@ɰk)ʼnqmxUA[n{%xg09=14&|9%@[ sYɈݏRsKa;ƉC6r"n=WPԻxǤ+& ݍ_"u\4.[=8[ZVM y8wW{L}h4|l* ,~yƇ*͗,N!$.($З]TQ_mGˎ:G&D?=A~zhI:c-72)ݦ‡_bgG3lCK usKa~r cdžf!}zg-3n`z^>9h\ etlCdO m/fvzsKaTUti48q{xv{ABEfX0&3'2K Vɰw ջ[-qgxw'LF XV̮-%C1a< 7UQ\7avTj6`#bn)LƿZ _8'ch81+@w;;dJf>$& M=?|Ye]\JV71QID}1dpNQ#(ڛfL+@'""ZC@'"k?|lD2%銢o܇ƜA: ՓIED GSS1‘ bj},,af!KmxvwwN_Xg3n.JN}E>X2^ܷGvt`4nFժN#@b[DjqjqjN; :]FlS/SAOUDh߸ 66q l([[^w[#E2Ov2ޫwad4=\sWRn6k:3z ϱԪmWyNW.2|`1Qa^y5PݙU2汲[Ӣ%S;*3mu(z9=jTG'LQ4?M_%!F{`A WBk<(TCӆotQ$:ƋOZphF|R"okI`vB~rOl];klWEpF<*+G<'ʁ:zc|WvxΪڨJXi?_Afzh.VZ #),#*FDDDDT@'"[—+C9TZZ ~xqߎWжg|i hv4 ^ܻl%^#;r1nF(ފ\>ÜUVE{]8{6^{v+D`AW᬴ʚ2,-{X<)Fՙτϕj{T~VQ^Ta:O|rIDDъFݏ/R8ovdG:V\&ڊC6BYmm2|뷎q=$ɼn-+wckK}^ηV9*,x|SljG{kq{xcXބwo^Sلo4|܋7]Smv2'7zƉ_Px}s/k \"ۇ}MngFrME՘JFnq7GUhDD^Cݝьӷzp fT?VAJZC~yՃW>bn=:-vw=R-kAwboW;:{ơ(**3qpk'Ū 9+,lEw[#:Ь·^~S87G_Tf;ښJRhp`K4toq$(р6]0yEf o/DeZ*KT+R@U ϗmO_mhqa'։؍> ""ՕMxvV\7 j4ۻظ _؉IU6lBBwX@aTXjUM% HEYZJ q}Y8@kw;;O_xf?N;S>ɜcg27sw#YQu=S$X?B.\[[Yswߟ|<ӧ216:DGå]oAݑΡ١94c?d7._GG&͵[~O9nJ~_)c #:pgZV׳0S?sܷwrogvɩ?6wojVVײƺt휞Hg8 !8QwѮ|F̟馣dj|59X0:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:$Q@:ͦ0dJ3z_1E~z`H\]O QL DT0( t`7;RE5/+(Ё#I햷0(.RSczGpJIWAp{~v-w!#v)Џ@:S V)kJWb:p(ЁN(w#`tSwR~d$RʟZgH84h?/# )Ёw|%ɘPtq$/!)Ёw|:R2{?PJ pPYJ)؏F;HHڭ%A?&8Z,]UU$Iל[v5-n[tZys:#)ЁTU$_ٮDpJ)<5n?PP=j6ɫIN7CRʳMx@zRJMPx%@J)/&y I>[JYo:r;PH$i: CJRKM;ЁRJYI$o6 9p)ЁVJI4l){MCK)$LFYhT[gP;ЁZ:I>tF/R~t~r%'|7;2xx=ǕQ@j+lRI.6N$+t~r;WUUM&v$h8$_+ @$_OtPϯ|rAI TUUI>I>dD$?IB)M8 tTU5O%y"v $y_Rl$@UUչ$Iftp,ytyIDATxɀK˾ IENDB`glance-12.0.0/glance/tests/var/ca.crt0000664000567000056710000000240512701407047020447 0ustar jenkinsjenkins00000000000000-----BEGIN CERTIFICATE----- MIIDiTCCAnGgAwIBAgIJAMj+Lfpqc9lLMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMRIwEAYDVQQKDAlPcGVuU3RhY2sx DzANBgNVBAsMBkdsYW5jZTESMBAGA1UEAwwJR2xhbmNlIENBMB4XDTE1MDEzMTA1 MzAyNloXDTI1MDEyODA1MzAyNlowWzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNv bWUtU3RhdGUxEjAQBgNVBAoMCU9wZW5TdGFjazEPMA0GA1UECwwGR2xhbmNlMRIw EAYDVQQDDAlHbGFuY2UgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB AQDcW4cRtw96/ZYsx3UB1jWWT0pAlsMQ03En7dueh9o4UZYChY2NMqTJ3gVqy1vf 4wyRU1ROb/N5L4KdQiJARH/ARbV+qrWoRvkcWBfg9w/4uZ9ZFhCBbaa2cAtTIGzV ta6HP9UPeyfXrS+jgjqU2QN3bcc0ZCMAiQbtW7Vpw8RNr0NvTJDaSCzmpGQ7TQtB 0jXm1nSG7FZUbojUCYB6TBGd01Cg8GzAai3ngXDq6foVJEwfmaV2Zapb0A4FLquX OzebskY5EL/okQGPofSRCu/ar+HV4HN3+PgIIrfa8RhDDdlv6qE1iEuS6isSH1s+ 7BA2ZKfzT5t8G/8lSjKa/r2pAgMBAAGjUDBOMB0GA1UdDgQWBBT3M/WuigtS7JYZ QD0XJEDD8JSZrTAfBgNVHSMEGDAWgBT3M/WuigtS7JYZQD0XJEDD8JSZrTAMBgNV HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCWOhC9kBZAJalQhAeNGIiiJ2bV HpvzSCEXSEAdh3A0XDK1KxoMHy1LhNGYrMmN2a+2O3SoX0FLB4p9zOifq4ACwaMD CjQeB/whsfPt5s0gV3mGMCR+V2b8r5H/30KRbIzQGXmy+/r6Wfe012jcVVXsQawW Omd4d+Bduf5iiL1OCKEMepqjQLu7Yg41ucRpUewBA+A9hoKp7jpwSnzSALX7FWEQ TBJtJ9jEnZl36S81eZJvOXSzeptHyomSAt8eGFCVuPB0dZCXuBNLu4Gsn+dIhfyj NwK4noYZXMndPwGy92KDhjxVnHzd9HwImgr6atmWhPPz5hm50BrA7sv06Nto -----END CERTIFICATE----- glance-12.0.0/glance/tests/var/certificate.crt0000664000567000056710000001261612701407047022353 0ustar jenkinsjenkins00000000000000# > openssl x509 -in glance/tests/var/certificate.crt -noout -text # Certificate: # Data: # Version: 1 (0x0) # Serial Number: 1 (0x1) # Signature Algorithm: sha1WithRSAEncryption # Issuer: C=AU, ST=Some-State, O=OpenStack, OU=Glance, CN=Glance CA # Validity # Not Before: Feb 2 20:22:13 2015 GMT # Not After : Jan 31 20:22:13 2024 GMT # Subject: C=AU, ST=Some-State, O=OpenStack, OU=Glance, CN=127.0.0.1 # Subject Public Key Info: # Public Key Algorithm: rsaEncryption # RSA Public Key: (4096 bit) # Modulus (4096 bit): # 00:9f:44:13:51:de:e9:5a:f7:ac:33:2a:1a:4c:91: # a1:73:bc:f3:a6:d3:e6:59:ae:e8:e2:34:68:3e:f4: # 40:c1:a1:1a:65:9a:a3:67:e9:2c:b9:79:9c:00:b1: # 7c:c1:e6:9e:de:47:bf:f1:cb:f2:73:d4:c3:62:fe: # 82:90:6f:b4:75:ca:7e:56:8f:99:3d:06:51:3c:40: # f4:ff:74:97:4f:0d:d2:e6:66:76:8d:97:bf:89:ce: # fe:b2:d7:89:71:f2:a0:d9:f5:26:7c:1a:7a:bf:2b: # 8f:72:80:e7:1f:4d:4a:40:a3:b9:9e:33:f6:55:e0: # 40:2b:1e:49:e4:8c:71:9d:11:32:cf:21:41:e1:13: # 28:c6:d6:f6:e0:b3:26:10:6d:5b:63:1d:c3:ee:d0: # c4:66:63:38:89:6b:8f:2a:c2:bd:4f:e4:bc:03:8f: # a2:f2:5c:1d:73:11:9c:7b:93:3d:d6:a3:d1:2d:cd: # 64:23:24:bc:65:3c:71:20:28:60:a0:ea:fe:77:0e: # 1d:95:36:76:ad:e7:2f:1c:27:62:55:e3:9d:11:c1: # fb:43:3e:e5:21:ac:fd:0e:7e:3d:c9:44:d2:bd:6f: # 89:7e:0f:cb:88:54:57:fd:8d:21:c8:34:e1:47:01: # 28:0f:45:a1:7e:60:1a:9c:4c:0c:b8:c1:37:2d:46: # ab:18:9e:ca:49:d3:77:b7:92:3a:d2:7f:ca:d5:02: # f1:75:81:66:39:51:aa:bc:d7:f0:91:23:69:e8:71: # ae:44:76:5e:87:54:eb:72:fc:ac:fd:60:22:e0:6a: # e4:ad:37:b7:f6:e5:24:b4:95:2c:26:0e:75:a0:e9: # ed:57:be:37:42:64:1f:02:49:0c:bd:5d:74:6d:e6: # f2:da:5c:54:82:fa:fc:ff:3a:e4:1a:7a:a9:3c:3d: # ee:b5:df:09:0c:69:c3:51:92:67:80:71:9b:10:8b: # 20:ff:a2:5e:c5:f2:86:a0:06:65:1c:42:f9:91:24: # 54:29:ed:7e:ec:db:4c:7b:54:ee:b1:25:1b:38:53: # ae:01:b6:c5:93:1e:a3:4d:1b:e8:73:47:50:57:e8: # ec:a0:80:53:b1:34:74:37:9a:c1:8c:14:64:2e:16: # dd:a1:2e:d3:45:3e:2c:46:62:20:2a:93:7a:92:4c: # b2:cc:64:47:ad:63:32:0b:68:0c:24:98:20:83:08: # 35:74:a7:68:7a:ef:d6:84:07:d1:5e:d7:c0:6c:3f: # a7:4a:78:62:a8:70:75:37:fb:ce:1f:09:1e:7c:11: # 35:cc:b3:5a:a3:cc:3f:35:c9:ee:24:6f:63:f8:54: # 6f:7c:5b:b4:76:3d:f2:81:6d:ad:64:66:10:d0:c4: # 0b:2c:2f # Exponent: 65537 (0x10001) # Signature Algorithm: sha1WithRSAEncryption # 5f:e8:a8:93:20:6c:0f:12:90:a6:e2:64:21:ed:63:0e:8c:e0: # 0f:d5:04:13:4d:2a:e9:a5:91:b7:e4:51:94:bd:0a:70:4b:94: # c7:1c:94:ed:d7:64:95:07:6b:a1:4a:bc:0b:53:b5:1a:7e:f1: # 9c:12:59:24:5f:36:72:34:ca:33:ee:28:46:fd:21:e6:52:19: # 0c:3d:94:6b:bd:cb:76:a1:45:7f:30:7b:71:f1:84:b6:3c:e0: # ac:af:13:81:9c:0e:6e:3c:9b:89:19:95:de:8e:9c:ef:70:ac: # 07:ae:74:42:47:35:50:88:36:ec:32:1a:55:24:08:f2:44:57: # 67:fe:0a:bb:6b:a7:bd:bc:af:bf:2a:e4:dd:53:84:6b:de:1d: # 2a:28:21:38:06:7a:5b:d8:83:15:65:31:6d:61:67:00:9e:1a: # 61:85:15:a2:4c:9a:eb:6d:59:8e:34:ac:2c:d5:24:4e:00:ff: # 30:4d:a3:d5:80:63:17:52:65:ac:7f:f4:0a:8e:56:a4:97:51: # 39:81:ae:e8:cb:52:09:b3:47:b4:fd:1b:e2:04:f9:f2:76:e3: # 63:ef:90:aa:54:98:96:05:05:a9:91:76:18:ed:5d:9e:6e:88: # 50:9a:f7:2c:ce:5e:54:ba:15:ec:62:ff:5d:be:af:35:03:b1: # 3f:32:3e:0e -----BEGIN CERTIFICATE----- MIIEKjCCAxICAQEwDQYJKoZIhvcNAQEFBQAwWzELMAkGA1UEBhMCQVUxEzARBgNV BAgMClNvbWUtU3RhdGUxEjAQBgNVBAoMCU9wZW5TdGFjazEPMA0GA1UECwwGR2xh bmNlMRIwEAYDVQQDDAlHbGFuY2UgQ0EwHhcNMTUwMjAyMjAyMjEzWhcNMjQwMTMx MjAyMjEzWjBbMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTESMBAG A1UEChMJT3BlblN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEjAQBgNVBAMTCTEyNy4w LjAuMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ9EE1He6Vr3rDMq GkyRoXO886bT5lmu6OI0aD70QMGhGmWao2fpLLl5nACxfMHmnt5Hv/HL8nPUw2L+ gpBvtHXKflaPmT0GUTxA9P90l08N0uZmdo2Xv4nO/rLXiXHyoNn1Jnwaer8rj3KA 5x9NSkCjuZ4z9lXgQCseSeSMcZ0RMs8hQeETKMbW9uCzJhBtW2Mdw+7QxGZjOIlr jyrCvU/kvAOPovJcHXMRnHuTPdaj0S3NZCMkvGU8cSAoYKDq/ncOHZU2dq3nLxwn YlXjnRHB+0M+5SGs/Q5+PclE0r1viX4Py4hUV/2NIcg04UcBKA9FoX5gGpxMDLjB Ny1GqxieyknTd7eSOtJ/ytUC8XWBZjlRqrzX8JEjaehxrkR2XodU63L8rP1gIuBq 5K03t/blJLSVLCYOdaDp7Ve+N0JkHwJJDL1ddG3m8tpcVIL6/P865Bp6qTw97rXf CQxpw1GSZ4BxmxCLIP+iXsXyhqAGZRxC+ZEkVCntfuzbTHtU7rElGzhTrgG2xZMe o00b6HNHUFfo7KCAU7E0dDeawYwUZC4W3aEu00U+LEZiICqTepJMssxkR61jMgto DCSYIIMINXSnaHrv1oQH0V7XwGw/p0p4YqhwdTf7zh8JHnwRNcyzWqPMPzXJ7iRv Y/hUb3xbtHY98oFtrWRmENDECywvAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAF/o qJMgbA8SkKbiZCHtYw6M4A/VBBNNKumlkbfkUZS9CnBLlMcclO3XZJUHa6FKvAtT tRp+8ZwSWSRfNnI0yjPuKEb9IeZSGQw9lGu9y3ahRX8we3HxhLY84KyvE4GcDm48 m4kZld6OnO9wrAeudEJHNVCINuwyGlUkCPJEV2f+Crtrp728r78q5N1ThGveHSoo ITgGelvYgxVlMW1hZwCeGmGFFaJMmuttWY40rCzVJE4A/zBNo9WAYxdSZax/9AqO VqSXUTmBrujLUgmzR7T9G+IE+fJ242PvkKpUmJYFBamRdhjtXZ5uiFCa9yzOXlS6 Fexi/12+rzUDsT8yPg4= -----END CERTIFICATE----- glance-12.0.0/glance/tests/var/testserver-bad-ovf.ova0000664000567000056710000002400012701407047023576 0ustar jenkinsjenkins00000000000000illegal-xml.ovf0000644000175000017500000000007612662226344012147 0ustar otcotc does not match <> testserver-disk1.vmdk0000644000175000017500000000000412562114301013301 0ustar otcotcABCDglance-12.0.0/glance/tests/var/ca.key0000664000567000056710000000325012701407047020446 0ustar jenkinsjenkins00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDcW4cRtw96/ZYs x3UB1jWWT0pAlsMQ03En7dueh9o4UZYChY2NMqTJ3gVqy1vf4wyRU1ROb/N5L4Kd QiJARH/ARbV+qrWoRvkcWBfg9w/4uZ9ZFhCBbaa2cAtTIGzVta6HP9UPeyfXrS+j gjqU2QN3bcc0ZCMAiQbtW7Vpw8RNr0NvTJDaSCzmpGQ7TQtB0jXm1nSG7FZUbojU CYB6TBGd01Cg8GzAai3ngXDq6foVJEwfmaV2Zapb0A4FLquXOzebskY5EL/okQGP ofSRCu/ar+HV4HN3+PgIIrfa8RhDDdlv6qE1iEuS6isSH1s+7BA2ZKfzT5t8G/8l SjKa/r2pAgMBAAECggEABeoS+v+906BAypzj4BO+xnUEWi1xuN7j951juqKM0dwm uZSaEwMb9ysVXCNvKNgwOypQZfaNQ2BqEgx3XOA5yZBVabvtOkIFZ6RZp7kZ3aQl yb9U3BR0WAsz0pxZL3c74vdsoYi9rgVA9ROGvP4CIM96fEZ/xgDnhbFjch5GA4u2 8XQ/kJUwLl0Uzxyo10sqGu3hgMwpM8lpaRW6d5EQ628rJEtA/Wmy5GpyCUhTD/5B jE1IzhjT4T5LqiPjA/Dsmz4Sa0+MyKRmA+zfSH6uS4szSaj53GVMHh4K+Xg2/EeD 6I3hGOtzZuYp5HBHE6J8VgeuErBQf32CCglHqN/dLQKBgQD4XaXa+AZtB10cRUV4 LZDB1AePJLloBhKikeTboZyhZEwbNuvw3JSQBAfUdpx3+8Na3Po1Tfy3DlZaVCU2 0PWh2UYrtwA3dymp8GCuSvnsLz1kNGv0Q7WEYaepyKRO8qHCjrTDUFuGVztU+H6O OWPHRd4DnyF3pKN7K4j6pU76HwKBgQDjIXylwPb6TD9ln13ijJ06t9l1E13dSS0B +9QU3f4abjMmW0K7icrNdmsjHafWLGXP2dxB0k4sx448buH+L8uLjC8G80wLQMSJ NAKpxIsmkOMpPUl80ks8bmzsqztmtql6kAgSwSW84vftJyNrFnp2kC2O4ZYGwz1+ 8rj3nBrfNwKBgQDrCJxCyoIyPUy0yy0BnIUnmAILSSKXuV97LvtXiOnTpTmMa339 8pA4dUf/nLtXpA3r98BkH0gu50d6tbR92mMI5bdM+SIgWwk3g33KkrNN+iproFwk zMqC23Mx7ejnuR6xIiEXz/y89eH0+C+zYcX1tz1xSe7+7PO0RK+dGkDR2wKBgHGR L+MtPhDfCSAF9IqvpnpSrR+2BEv+J8wDIAMjEMgka9z06sQc3NOpL17KmD4lyu6H z3L19fK8ASnEg6l2On9XI7iE9HP3+Y1k/SPny3AIKB1ZsKICAG6CBGK+J6BvGwTW ecLu4rC0iCUDWdlUzvzzkGQN9dcBzoDoWoYsft83AoGAAh4MyrM32gwlUgQD8/jX 8rsJlKnme0qMjX4A66caBomjztsH2Qt6cH7DIHx+hU75pnDAuEmR9xqnX7wFTR9Y 0j/XqTVsTjDINRLgMkrg7wIqKtWdicibBx1ER9LzwfNwht/ZFeMLdeUUUYMNv3cg cMSLxlxgFaUggYj/dsF6ypQ= -----END PRIVATE KEY----- glance-12.0.0/glance/tests/var/privatekey.key0000664000567000056710000000625312701407047022254 0ustar jenkinsjenkins00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAn0QTUd7pWvesMyoaTJGhc7zzptPmWa7o4jRoPvRAwaEaZZqj Z+ksuXmcALF8weae3ke/8cvyc9TDYv6CkG+0dcp+Vo+ZPQZRPED0/3SXTw3S5mZ2 jZe/ic7+steJcfKg2fUmfBp6vyuPcoDnH01KQKO5njP2VeBAKx5J5IxxnREyzyFB 4RMoxtb24LMmEG1bYx3D7tDEZmM4iWuPKsK9T+S8A4+i8lwdcxGce5M91qPRLc1k IyS8ZTxxIChgoOr+dw4dlTZ2recvHCdiVeOdEcH7Qz7lIaz9Dn49yUTSvW+Jfg/L iFRX/Y0hyDThRwEoD0WhfmAanEwMuME3LUarGJ7KSdN3t5I60n/K1QLxdYFmOVGq vNfwkSNp6HGuRHZeh1Trcvys/WAi4GrkrTe39uUktJUsJg51oOntV743QmQfAkkM vV10beby2lxUgvr8/zrkGnqpPD3utd8JDGnDUZJngHGbEIsg/6JexfKGoAZlHEL5 kSRUKe1+7NtMe1TusSUbOFOuAbbFkx6jTRvoc0dQV+jsoIBTsTR0N5rBjBRkLhbd oS7TRT4sRmIgKpN6kkyyzGRHrWMyC2gMJJgggwg1dKdoeu/WhAfRXtfAbD+nSnhi qHB1N/vOHwkefBE1zLNao8w/NcnuJG9j+FRvfFu0dj3ygW2tZGYQ0MQLLC8CAwEA AQKCAgBL4IvvymqUu0CgE6P57LvlvxS522R4P7uV4W/05jtfxJgl5fmJzO5Q4x4u umB8pJn1vms1EHxPMQNxS1364C0ynSl5pepUx4i2UyAmAG8B680ZlaFPrgdD6Ykw vT0vO2/kx0XxhFAMef1aiQ0TvaftidMqCwmGOlN393Mu3rZWJVZ2lhqj15Pqv4lY 3iD5XJBYdVrekTmwqf7KgaLwtVyqDoiAjdMM8lPZeX965FhmxR8oWh0mHR9gf95J etMmdy6Km//+EbeS/HxWRnE0CD/RsQA7NmDFnXvmhsB6/j4EoHn5xB6ssbpGAxIg JwlY4bUrKXpaEgE7i4PYFb1q5asnTDdUZYAGAGXSBbDiUZM2YOe1aaFB/SA3Y3K2 47brnx7UXhAXSPJ16EZHejSeFbzZfWgj2J1t3DLk18Fpi/5AxxIy/N5J38kcP7xZ RIcSV1QEasYUrHI9buhuJ87tikDBDFEIIeLZxlyeIdwmKrQ7Vzny5Ls94Wg+2UtI XFLDak5SEugdp3LmmTJaugF+s/OiglBVhcaosoKRXb4K29M7mQv2huEAerFA14Bd dp2KByd8ue+fJrAiSxhAyMDAe/uv0ixnmBBtMH0YYHbfUIgl+kR1Ns/bxrJu7T7F kBQWZV4NRbSRB+RGOG2/Ai5jxu0uLu3gtHMO4XzzElWqzHEDoQKCAQEAzfaSRA/v 0831TDL8dmOCO61TQ9GtAa8Ouj+SdyTwk9f9B7NqQWg7qdkbQESpaDLvWYiftoDw mBFHLZe/8RHBaQpEAfbC/+DO6c7O+g1/0Cls33D5VaZOzFnnbHktT3r5xwkZfVBS aPPWl/IZOU8TtNqujQA+mmSnrJ7IuXSsBVq71xgBQT9JBZpUcjZ4eQducmtC43CP GqcSjq559ZKc/sa3PkAtNlKzSUS1abiMcJ86C9PgQ9gOu7y8SSqQ3ivZkVM99rxm wo8KehCcHOPOcIUQKmx4Bs4V3chm8rvygf3aanUHi83xaMeFtIIuOgAJmE9wGQeo k0UGvKBUDIenfwKCAQEAxfVFVxMBfI4mHrgTj/HOq7GMts8iykJK1PuELU6FZhex XOqXRbQ5dCLsyehrKlVPFqUENhXNHaOQrCOZxiVoRje2PfU/1fSqRaPxI7+W1Fsh Fq4PkdJ66NJZJkK5NHwE8SyQf+wpLdL3YhY5LM3tWdX5U9Rr6N8qelE3sLPssAak 1km4/428+rkp1BlCffr3FyL0KJmOYfMiAr8m6hRZWbhkvm5YqX1monxUrKdFJ218 dxzyniqoS1yU5RClY6783dql1UO4AvxpzpCPYDFIwbEb9zkUo0przhmi4KzyxknB /n/viMWzSnsM9YbakH6KunDTUteme1Dri3Drrq9TUQKCAQAVdvL7YOXPnxFHZbDl 7azu5ztcQAfVuxa/1kw/WnwwDDx0hwA13NUK+HNcmUtGbrh/DjwG2x032+UdHUmF qCIN/mHkCoF8BUPLHiB38tw1J3wPNUjm4jQoG96AcYiFVf2d/pbHdo2AHplosHRs go89M+UpELN1h7Ppy4qDuWMME86rtfa7hArqKJFQbdjUVC/wgLkx1tMzJeJLOGfB bgwqiS8jr7CGjsvcgOqfH/qS6iU0glpG98dhTWQaA/OhE9TSzmgQxMW41Qt0eTKr 2Bn1pAhxQ2im3Odue6ou9eNqJLiUi6nDqizUjKakj0SeCs71LqIyGZg58OGo2tSn kaOlAoIBAQCE/fO4vQcJpAJOLwLNePmM9bqAcoZ/9auKjPNO8OrEHPTGZMB+Tscu k+wa9a9RgICiyPgcUec8m0+tpjlAGo+EZRdlZqedWUMviCWQC74MKrD/KK9DG3IB ipfkEX2VmiBD2tm1Z3Z+17XlSuLci/iCmzNnM1XP3GYQSRIt/6Lq23vQjzTfU1z7 4HwOh23Zb0qjW5NG12sFuS9HQx6kskkY8r2UBlRAggP686Z7W+EkzPSKnYMN6cCo 6KkLf3RtlPlDHwq8TUOJlgSLhykbyeCEaDVOkSWhUnU8wJJheS+dMZ5IGbFWZOPA DQ02woOCAdG30ebXSBQL0uB8DL/52sYRAoIBAHtW3NomlxIMqWX8ZYRJIoGharx4 ikTOR/jeETb9t//n6kV19c4ICiXOQp062lwEqFvHkKzxKECFhJZuwFc09hVxUXxC LJjvDfauHWFHcrDTWWbd25CNeZ4Sq79GKf+HJ+Ov87WYcjuBFlCh8ES+2N4WZGCn B5oBq1g6E4p1k6xA5eE6VRiHPuFH8N9t1x6IlCZvZBhuVWdDrDd4qMSDEUTlcxSY mtcAIXTPaPcdb3CjdE5a38r59x7dZ/Te2K7FKETffjSmku7BrJITz3iXEk+sn8ex o3mdnFgeQ6/hxvMGgdK2qNb5ER/s0teFjnfnwHuTSXngMDIDb3kLL0ecWlQ= -----END RSA PRIVATE KEY----- glance-12.0.0/glance/tests/var/testserver.ova0000664000567000056710000005000012701407047022261 0ustar jenkinsjenkins00000000000000testserver.ovf0000644!00042560000003210712562113043014337 0ustar jjasekxintelall List of the virtual disks used in the package Logical networks used in the package Logical network used by this appliance. A virtual machine The kind of installed guest operating system Ubuntu_64 Ubuntu_64 Virtual hardware requirements for a virtual machine Virtual Hardware Family 0 testserver virtualbox-2.2 1 virtual CPU Number of virtual CPUs 1 virtual CPU 1 3 1 MegaBytes 512 MB of memory Memory Size 512 MB of memory 2 4 512 0 ideController0 IDE Controller ideController0 3 PIIX4 5 1 ideController1 IDE Controller ideController1 4 PIIX4 5 0 sataController0 SATA Controller sataController0 5 AHCI 20 0 usb USB Controller usb 6 23 3 false sound Sound Card sound 7 ensoniq1371 35 0 true cdrom1 CD-ROM Drive cdrom1 8 4 15 0 disk2 Disk Image disk2 /disk/vmdisk2 9 5 17 true Ethernet adapter on 'NAT' NAT Ethernet adapter on 'NAT' 10 E1000 10 DMTF:x86:64 DMTF:x86:VT-d Complete VirtualBox machine configuration in VirtualBox format testserver-disk1.vmdk0000644!00042560000000000412562114301015504 0ustar jjasekxintelallABCDglance-12.0.0/glance/tests/var/testserver-no-disk.ova0000664000567000056710000005000012701407047023623 0ustar jenkinsjenkins00000000000000testserver.ovf0000644!00042560000003130712561117144014345 0ustar jjasekxintelall List of the virtual disks used in the package Logical networks used in the package Logical network used by this appliance. A virtual machine The kind of installed guest operating system Ubuntu_64 Ubuntu_64 Virtual hardware requirements for a virtual machine Virtual Hardware Family 0 testserver virtualbox-2.2 1 virtual CPU Number of virtual CPUs 1 virtual CPU 1 3 1 MegaBytes 512 MB of memory Memory Size 512 MB of memory 2 4 512 0 ideController0 IDE Controller ideController0 3 PIIX4 5 1 ideController1 IDE Controller ideController1 4 PIIX4 5 0 sataController0 SATA Controller sataController0 5 AHCI 20 0 usb USB Controller usb 6 23 3 false sound Sound Card sound 7 ensoniq1371 35 0 true cdrom1 CD-ROM Drive cdrom1 8 4 15 0 disk2 Disk Image disk2 /disk/vmdisk2 9 5 17 true Ethernet adapter on 'NAT' NAT Ethernet adapter on 'NAT' 10 E1000 10 Complete VirtualBox machine configuration in VirtualBox format glance-12.0.0/glance/tests/var/testserver-no-ovf.ova0000664000567000056710000002400012701407047023464 0ustar jenkinsjenkins00000000000000testserver-disk1.vmdk0000644!00042560000000000512561140034015506 0ustar jjasekxintelallABCD glance-12.0.0/glance/tests/unit/0000775000567000056710000000000012701407204017533 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/test_glare_plugin_loader.py0000664000567000056710000001635212701407051025151 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock import pkg_resources from glance.common import exception from glance.common.glare import loader from glance.contrib.plugins.artifacts_sample.v1 import artifact as art1 from glance.contrib.plugins.artifacts_sample.v2 import artifact as art2 from glance.tests import utils class MyArtifactDuplicate(art1.MyArtifact): __type_version__ = '1.0.1' __type_name__ = 'MyArtifact' class MyArtifactOk(art1.MyArtifact): __type_version__ = '1.0.2' __type_name__ = 'MyArtifact' class TestArtifactsLoader(utils.BaseTestCase): def setUp(self): self.path = 'glance.contrib.plugins.artifacts_sample' self._setup_loader(['MyArtifact=%s.v1.artifact:MyArtifact' % self.path]) super(TestArtifactsLoader, self).setUp() def _setup_loader(self, artifacts): self.loader = None mock_this = 'stevedore.extension.ExtensionManager._find_entry_points' with mock.patch(mock_this) as fep: fep.return_value = [ pkg_resources.EntryPoint.parse(art) for art in artifacts] self.loader = loader.ArtifactsPluginLoader( 'glance.artifacts.types') def test_load(self): """ Plugins can be loaded as entrypoint=sigle plugin and entrypoint=[a, list, of, plugins] """ # single version self.assertEqual(1, len(self.loader.mgr.extensions)) self.assertEqual(art1.MyArtifact, self.loader.get_class_by_endpoint('myartifact')) # entrypoint = [a, list] path = os.path.splitext(__file__)[0][__file__.rfind( 'glance'):].replace('/', '.') self._setup_loader([ 'MyArtifact=%s:MyArtifactOk' % path, 'MyArtifact=%s.v2.artifact:MyArtifact' % self.path, 'MyArtifact=%s.v1.artifact:MyArtifact' % self.path]), self.assertEqual(3, len(self.loader.mgr.extensions)) # returns the plugin with the latest version self.assertEqual(art2.MyArtifact, self.loader.get_class_by_endpoint('myartifact')) self.assertEqual(art1.MyArtifact, self.loader.get_class_by_endpoint('myartifact', '1.0.1')) def test_basic_loader_func(self): """Test public methods of PluginLoader class here""" # type_version 2 == 2.0 == 2.0.0 self._setup_loader( ['MyArtifact=%s.v2.artifact:MyArtifact' % self.path]) self.assertEqual(art2.MyArtifact, self.loader.get_class_by_endpoint('myartifact')) self.assertEqual(art2.MyArtifact, self.loader.get_class_by_endpoint('myartifact', '2.0')) self.assertEqual(art2.MyArtifact, self.loader.get_class_by_endpoint('myartifact', '2.0.0')) self.assertEqual(art2.MyArtifact, self.loader.get_class_by_endpoint('myartifact', '2')) # now make sure that get_class_by_typename works as well self.assertEqual(art2.MyArtifact, self.loader.get_class_by_typename('MyArtifact')) self.assertEqual(art2.MyArtifact, self.loader.get_class_by_typename('MyArtifact', '2')) def test_config_validation(self): """ Plugins can be loaded on certain conditions: * entry point name == type_name * no plugin with the same type_name and version has been already loaded """ path = 'glance.contrib.plugins.artifacts_sample' # here artifacts specific validation is checked self.assertRaises(exception.ArtifactNonMatchingTypeName, self._setup_loader, ['non_matching_name=%s.v1.artifact:MyArtifact' % path]) # make sure this call is ok self._setup_loader(['MyArtifact=%s.v1.artifact:MyArtifact' % path]) art_type = self.loader.get_class_by_endpoint('myartifact') self.assertEqual('MyArtifact', art_type.metadata.type_name) self.assertEqual('1.0.1', art_type.metadata.type_version) # now try to add duplicate artifact with the same type_name and # type_version as already exists bad_art_path = os.path.splitext(__file__)[0][__file__.rfind( 'glance'):].replace('/', '.') self.assertEqual(art_type.metadata.type_version, MyArtifactDuplicate.metadata.type_version) self.assertEqual(art_type.metadata.type_name, MyArtifactDuplicate.metadata.type_name) # should raise an exception as (name, version) is not unique self.assertRaises( exception.ArtifactDuplicateNameTypeVersion, self._setup_loader, ['MyArtifact=%s.v1.artifact:MyArtifact' % path, 'MyArtifact=%s:MyArtifactDuplicate' % bad_art_path]) # two artifacts with the same name but different versions coexist fine self.assertEqual('MyArtifact', MyArtifactOk.metadata.type_name) self.assertNotEqual(art_type.metadata.type_version, MyArtifactOk.metadata.type_version) self._setup_loader(['MyArtifact=%s.v1.artifact:MyArtifact' % path, 'MyArtifact=%s:MyArtifactOk' % bad_art_path]) def test_check_function(self): """ A test to show that plugin-load specific options in artifacts.conf are correctly processed: * no plugins can be loaded if load_enabled = False * if available_plugins list is given only plugins specified can be be loaded """ self.config(load_enabled=False) self.assertRaises(exception.ArtifactLoadError, self._setup_loader, ['MyArtifact=%s.v1.artifact:MyArtifact' % self.path]) self.config(load_enabled=True, available_plugins=['MyArtifact-1.0.2']) self.assertRaises(exception.ArtifactLoadError, self._setup_loader, ['MyArtifact=%s.v1.artifact:MyArtifact' % self.path]) path = os.path.splitext(__file__)[0][__file__.rfind( 'glance'):].replace('/', '.') self._setup_loader(['MyArtifact=%s:MyArtifactOk' % path]) # make sure that plugin_map has the expected plugin self.assertEqual(MyArtifactOk, self.loader.get_class_by_endpoint('myartifact', '1.0.2')) glance-12.0.0/glance/tests/unit/test_misc.py0000664000567000056710000000560112701407047022106 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import six # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.common import crypt from glance.common import utils from glance.tests import utils as test_utils class UtilsTestCase(test_utils.BaseTestCase): def test_encryption(self): # Check that original plaintext and unencrypted ciphertext match # Check keys of the three allowed lengths key_list = ["1234567890abcdef", "12345678901234567890abcd", "1234567890abcdef1234567890ABCDEF"] plaintext_list = [''] blocksize = 64 for i in range(3 * blocksize): text = os.urandom(i) if six.PY3: text = text.decode('latin1') plaintext_list.append(text) for key in key_list: for plaintext in plaintext_list: ciphertext = crypt.urlsafe_encrypt(key, plaintext, blocksize) self.assertIsInstance(ciphertext, str) self.assertNotEqual(ciphertext, plaintext) text = crypt.urlsafe_decrypt(key, ciphertext) self.assertIsInstance(text, str) self.assertEqual(plaintext, text) def test_empty_metadata_headers(self): """Ensure unset metadata is not encoded in HTTP headers""" metadata = { 'foo': 'bar', 'snafu': None, 'bells': 'whistles', 'unset': None, 'empty': '', 'properties': { 'distro': '', 'arch': None, 'user': 'nobody', }, } headers = utils.image_meta_to_http_headers(metadata) self.assertNotIn('x-image-meta-snafu', headers) self.assertNotIn('x-image-meta-uset', headers) self.assertNotIn('x-image-meta-snafu', headers) self.assertNotIn('x-image-meta-property-arch', headers) self.assertEqual('bar', headers.get('x-image-meta-foo')) self.assertEqual('whistles', headers.get('x-image-meta-bells')) self.assertEqual('', headers.get('x-image-meta-empty')) self.assertEqual('', headers.get('x-image-meta-property-distro')) self.assertEqual('nobody', headers.get('x-image-meta-property-user')) glance-12.0.0/glance/tests/unit/test_glare_type_definition_framework.py0000664000567000056710000012632112701407047027576 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import functools import mock import six import glance.common.exception as exc from glance.common.glare import declarative import glance.common.glare.definitions as defs from glance.common.glare import serialization import glance.tests.utils as test_utils BASE = declarative.get_declarative_base() class TestDeclarativeProperties(test_utils.BaseTestCase): def test_artifact_type_properties(self): class SomeTypeWithNoExplicitName(BASE): some_attr = declarative.AttributeDefinition() class InheritedType(SomeTypeWithNoExplicitName): __type_version__ = '1.0' __type_name__ = 'ExplicitName' __type_description__ = 'Type description' __type_display_name__ = 'EXPLICIT_NAME' __endpoint__ = 'some_endpoint' some_attr = declarative.AttributeDefinition(display_name='NAME') base_type = SomeTypeWithNoExplicitName base_instance = SomeTypeWithNoExplicitName() self.assertIsNotNone(base_type.metadata) self.assertIsNotNone(base_instance.metadata) self.assertEqual(base_type.metadata, base_instance.metadata) self.assertEqual("SomeTypeWithNoExplicitName", base_type.metadata.type_name) self.assertEqual("SomeTypeWithNoExplicitName", base_type.metadata.type_display_name) self.assertEqual("1.0", base_type.metadata.type_version) self.assertIsNone(base_type.metadata.type_description) self.assertEqual('sometypewithnoexplicitname', base_type.metadata.endpoint) self.assertIsNone(base_instance.some_attr) self.assertIsNotNone(base_type.some_attr) self.assertEqual(base_type.some_attr, base_instance.metadata.attributes.all['some_attr']) self.assertEqual('some_attr', base_type.some_attr.name) self.assertEqual('some_attr', base_type.some_attr.display_name) self.assertIsNone(base_type.some_attr.description) derived_type = InheritedType derived_instance = InheritedType() self.assertIsNotNone(derived_type.metadata) self.assertIsNotNone(derived_instance.metadata) self.assertEqual(derived_type.metadata, derived_instance.metadata) self.assertEqual('ExplicitName', derived_type.metadata.type_name) self.assertEqual('EXPLICIT_NAME', derived_type.metadata.type_display_name) self.assertEqual('1.0', derived_type.metadata.type_version) self.assertEqual('Type description', derived_type.metadata.type_description) self.assertEqual('some_endpoint', derived_type.metadata.endpoint) self.assertIsNone(derived_instance.some_attr) self.assertIsNotNone(derived_type.some_attr) self.assertEqual(derived_type.some_attr, derived_instance.metadata.attributes.all['some_attr']) self.assertEqual('some_attr', derived_type.some_attr.name) self.assertEqual('NAME', derived_type.some_attr.display_name) def test_wrong_type_definition(self): def declare_wrong_type_version(): class WrongType(BASE): __type_version__ = 'abc' # not a semver return WrongType def declare_wrong_type_name(): class WrongType(BASE): __type_name__ = 'a' * 256 # too long return WrongType self.assertRaises(exc.InvalidArtifactTypeDefinition, declare_wrong_type_version) self.assertRaises(exc.InvalidArtifactTypeDefinition, declare_wrong_type_name) def test_base_declarative_attributes(self): class TestType(BASE): defaulted = declarative.PropertyDefinition(default=42) read_only = declarative.PropertyDefinition(readonly=True) required_attr = declarative.PropertyDefinition(required=True) e = self.assertRaises(exc.InvalidArtifactPropertyValue, TestType) self.assertEqual('required_attr', e.name) self.assertIsNone(e.value) tt = TestType(required_attr="universe") self.assertEqual('universe', tt.required_attr) self.assertEqual(42, tt.defaulted) self.assertIsNone(tt.read_only) tt = TestType(required_attr="universe", defaulted=0, read_only="Hello") self.assertEqual(0, tt.defaulted) self.assertEqual("Hello", tt.read_only) tt.defaulted = 5 self.assertEqual(5, tt.defaulted) tt.required_attr = 'Foo' self.assertEqual('Foo', tt.required_attr) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'read_only', 'some_val') self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'required_attr', None) # no type checks in base AttributeDefinition o = object() tt.required_attr = o self.assertEqual(o, tt.required_attr) def test_generic_property(self): class TestType(BASE): simple_prop = declarative.PropertyDefinition() immutable_internal = declarative.PropertyDefinition(mutable=False, internal=True) prop_with_allowed = declarative.PropertyDefinition( allowed_values=["Foo", True, 42]) class DerivedType(TestType): prop_with_allowed = declarative.PropertyDefinition( allowed_values=["Foo", True, 42], required=True, default=42) tt = TestType() self.assertEqual(True, tt.metadata.attributes.all['simple_prop'].mutable) self.assertEqual(False, tt.metadata.attributes.all['simple_prop'].internal) self.assertEqual(False, tt.metadata.attributes.all[ 'immutable_internal'].mutable) self.assertEqual(True, tt.metadata.attributes.all[ 'immutable_internal'].internal) self.assertIsNone(tt.prop_with_allowed) tt = TestType(prop_with_allowed=42) self.assertEqual(42, tt.prop_with_allowed) tt = TestType(prop_with_allowed=True) self.assertEqual(True, tt.prop_with_allowed) tt = TestType(prop_with_allowed='Foo') self.assertEqual('Foo', tt.prop_with_allowed) tt.prop_with_allowed = 42 self.assertEqual(42, tt.prop_with_allowed) tt.prop_with_allowed = 'Foo' self.assertEqual('Foo', tt.prop_with_allowed) tt.prop_with_allowed = True self.assertEqual(True, tt.prop_with_allowed) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'prop_with_allowed', 'bar') # ensure that wrong assignment didn't change the value self.assertEqual(True, tt.prop_with_allowed) self.assertRaises(exc.InvalidArtifactPropertyValue, TestType, prop_with_allowed=False) dt = DerivedType() self.assertEqual(42, dt.prop_with_allowed) def test_default_violates_allowed(self): def declare_wrong_type(): class WrongType(BASE): prop = declarative.PropertyDefinition( allowed_values=['foo', 'bar'], default='baz') return WrongType self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, declare_wrong_type) def test_string_property(self): class TestType(BASE): simple = defs.String() with_length = defs.String(max_length=10, min_length=5) with_pattern = defs.String(pattern='^\\d+$', default='42') tt = TestType() tt.simple = 'foo' self.assertEqual('foo', tt.simple) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'simple', 42) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'simple', 'x' * 256) self.assertRaises(exc.InvalidArtifactPropertyValue, TestType, simple='x' * 256) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'with_length', 'x' * 11) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'with_length', 'x' * 4) tt.simple = 'x' * 5 self.assertEqual('x' * 5, tt.simple) tt.simple = 'x' * 10 self.assertEqual('x' * 10, tt.simple) self.assertEqual("42", tt.with_pattern) tt.with_pattern = '0' self.assertEqual('0', tt.with_pattern) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'with_pattern', 'abc') self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'with_pattern', '.123.') def test_binary_object_mutable(self): def declare_blob(mutable): class BLOB(BASE): prop = defs.BinaryObject(mutable=mutable) return BLOB blob = declare_blob(False)() self.assertFalse(blob.metadata.attributes.all['prop'].mutable) self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, functools.partial(declare_blob, True)) def test_default_and_allowed_violates_string_constrains(self): def declare_wrong_default(): class WrongType(BASE): prop = defs.String(min_length=4, default='foo') return WrongType def declare_wrong_allowed(): class WrongType(BASE): prop = defs.String(min_length=4, allowed_values=['foo', 'bar']) return WrongType self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, declare_wrong_default) self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, declare_wrong_allowed) def test_integer_property(self): class TestType(BASE): simple = defs.Integer() constrained = defs.Integer(min_value=10, max_value=50) tt = TestType() self.assertIsNone(tt.simple) self.assertIsNone(tt.constrained) tt.simple = 0 tt.constrained = 10 self.assertEqual(0, tt.simple) self.assertEqual(10, tt.constrained) tt.constrained = 50 self.assertEqual(50, tt.constrained) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'constrained', 1) self.assertEqual(50, tt.constrained) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'constrained', 51) self.assertEqual(50, tt.constrained) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'simple', '11') self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'simple', 10.5) def test_default_and_allowed_violates_int_constrains(self): def declare_wrong_default(): class WrongType(BASE): prop = defs.Integer(min_value=4, default=1) return WrongType def declare_wrong_allowed(): class WrongType(BASE): prop = defs.Integer(min_value=4, max_value=10, allowed_values=[1, 15]) return WrongType self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, declare_wrong_default) self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, declare_wrong_allowed) def test_numeric_values(self): class TestType(BASE): simple = defs.Numeric() constrained = defs.Numeric(min_value=3.14, max_value=4.1) tt = TestType(simple=0.1, constrained=4) self.assertEqual(0.1, tt.simple) self.assertEqual(4.0, tt.constrained) tt.simple = 1 self.assertEqual(1, tt.simple) tt.constrained = 3.14 self.assertEqual(3.14, tt.constrained) tt.constrained = 4.1 self.assertEqual(4.1, tt.constrained) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'simple', 'qwerty') self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'constrained', 3) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, 'constrained', 5) def test_default_and_allowed_violates_numeric_constrains(self): def declare_wrong_default(): class WrongType(BASE): prop = defs.Numeric(min_value=4.0, default=1.1) return WrongType def declare_wrong_allowed(): class WrongType(BASE): prop = defs.Numeric(min_value=4.0, max_value=10.0, allowed_values=[1.0, 15.5]) return WrongType self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, declare_wrong_default) self.assertRaises(exc.InvalidArtifactTypePropertyDefinition, declare_wrong_allowed) def test_same_item_type_array(self): class TestType(BASE): simple = defs.Array() unique = defs.Array(unique=True) simple_with_allowed_values = defs.Array( defs.String(allowed_values=["Foo", "Bar"])) defaulted = defs.Array(defs.Boolean(), default=[True, False]) constrained = defs.Array(item_type=defs.Numeric(min_value=0), min_size=3, max_size=5, unique=True) tt = TestType(simple=[]) self.assertEqual([], tt.simple) tt.simple.append("Foo") self.assertEqual(["Foo"], tt.simple) tt.simple.append("Foo") self.assertEqual(["Foo", "Foo"], tt.simple) self.assertEqual(2, len(tt.simple)) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.simple.append, 42) tt.simple.pop(1) self.assertEqual(["Foo"], tt.simple) del tt.simple[0] self.assertEqual(0, len(tt.simple)) tt.simple_with_allowed_values = ["Foo"] tt.simple_with_allowed_values.insert(0, "Bar") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.simple_with_allowed_values.append, "Baz") self.assertEqual([True, False], tt.defaulted) tt.defaulted.pop() self.assertEqual([True], tt.defaulted) tt2 = TestType() self.assertEqual([True, False], tt2.defaulted) self.assertIsNone(tt.constrained) tt.constrained = [10, 5, 4] self.assertEqual([10, 5, 4], tt.constrained) tt.constrained[1] = 15 self.assertEqual([10, 15, 4], tt.constrained) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.constrained.__setitem__, 1, -5) self.assertEqual([10, 15, 4], tt.constrained) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.constrained.remove, 15) self.assertEqual([10, 15, 4], tt.constrained) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.constrained.__delitem__, 1) self.assertEqual([10, 15, 4], tt.constrained) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.constrained.append, 15) self.assertEqual([10, 15, 4], tt.constrained) tt.unique = [] tt.unique.append("foo") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.unique.append, "foo") def test_tuple_style_array(self): class TestType(BASE): address = defs.Array( item_type=[defs.String(20), defs.Integer(min_value=1), defs.Boolean()]) tt = TestType(address=["Hope Street", 1234, True]) self.assertEqual("Hope Street", tt.address[0]) self.assertEqual(1234, tt.address[1]) self.assertEqual(True, tt.address[2]) # On Python 3, sort() fails because int (1) and string ("20") are not # comparable if six.PY2: self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.sort) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop, 0) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop, 1) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.pop) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.address.append, "Foo") def test_same_item_type_dict(self): class TestType(BASE): simple_props = defs.Dict() constrained_props = defs.Dict( properties=defs.Integer(min_value=1, allowed_values=[1, 2]), min_properties=2, max_properties=3) tt = TestType() self.assertIsNone(tt.simple_props) self.assertIsNone(tt.constrained_props) tt.simple_props = {} self.assertEqual({}, tt.simple_props) tt.simple_props["foo"] = "bar" self.assertEqual({"foo": "bar"}, tt.simple_props) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.simple_props.__setitem__, 42, "foo") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.simple_props.setdefault, "bar", 42) tt.constrained_props = {"foo": 1, "bar": 2} self.assertEqual({"foo": 1, "bar": 2}, tt.constrained_props) tt.constrained_props["baz"] = 1 self.assertEqual({"foo": 1, "bar": 2, "baz": 1}, tt.constrained_props) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.constrained_props.__setitem__, "foo", 3) self.assertEqual(1, tt.constrained_props["foo"]) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.constrained_props.__setitem__, "qux", 2) tt.constrained_props.pop("foo") self.assertEqual({"bar": 2, "baz": 1}, tt.constrained_props) tt.constrained_props['qux'] = 2 self.assertEqual({"qux": 2, "bar": 2, "baz": 1}, tt.constrained_props) tt.constrained_props.popitem() dict_copy = tt.constrained_props.copy() self.assertRaises(exc.InvalidArtifactPropertyValue, tt.constrained_props.popitem) self.assertEqual(dict_copy, tt.constrained_props) def test_composite_dict(self): class TestType(BASE): props = defs.Dict(properties={"foo": defs.String(), "bar": defs.Boolean()}) fixed = defs.Dict(properties={"name": defs.String(min_length=2), "age": defs.Integer(min_value=0, max_value=99)}) tt = TestType() tt.props = {"foo": "FOO", "bar": False} self.assertRaises(exc.InvalidArtifactPropertyValue, tt.props.__setitem__, "bar", 123) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.props.__setitem__, "extra", "value") tt.fixed = {"name": "Alex", "age": 42} self.assertRaises(exc.InvalidArtifactPropertyValue, tt.fixed.__setitem__, "age", 120) def test_immutables(self): class TestType(BASE): activated = defs.Boolean(required=True, default=False) name = defs.String(mutable=False) def __is_mutable__(self): return not self.activated tt = TestType() self.assertEqual(False, tt.activated) self.assertIsNone(tt.name) tt.name = "Foo" self.assertEqual("Foo", tt.name) tt.activated = True self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, "name", "Bar") self.assertEqual("Foo", tt.name) tt.activated = False tt.name = "Bar" self.assertEqual("Bar", tt.name) def test_readonly_array_dict(self): class TestType(BASE): arr = defs.Array(readonly=True) dict = defs.Dict(readonly=True) tt = TestType(arr=["Foo", "Bar"], dict={"qux": "baz"}) self.assertEqual(["Foo", "Bar"], tt.arr) self.assertEqual({"qux": "baz"}, tt.dict) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append, "Baz") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.insert, 0, "Baz") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.__setitem__, 0, "Baz") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.remove, "Foo") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.pop) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.dict.pop, "qux") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.dict.__setitem__, "qux", "foo") def test_mutable_array_dict(self): class TestType(BASE): arr = defs.Array(mutable=False) dict = defs.Dict(mutable=False) activated = defs.Boolean() def __is_mutable__(self): return not self.activated tt = TestType() tt.arr = [] tt.dict = {} tt.arr.append("Foo") tt.arr.insert(0, "Bar") tt.dict["baz"] = "qux" tt.activated = True self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append, "Baz") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.insert, 0, "Baz") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.__setitem__, 0, "Baz") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.remove, "Foo") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.pop) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.dict.pop, "qux") self.assertRaises(exc.InvalidArtifactPropertyValue, tt.dict.__setitem__, "qux", "foo") def test_readonly_as_write_once(self): class TestType(BASE): prop = defs.String(readonly=True) arr = defs.Array(readonly=True) tt = TestType() self.assertIsNone(tt.prop) tt.prop = "Foo" self.assertEqual("Foo", tt.prop) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt, "prop", "bar") tt2 = TestType() self.assertIsNone(tt2.prop) tt2.prop = None self.assertIsNone(tt2.prop) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt2, "prop", None) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, tt2, "prop", "foo") self.assertIsNone(tt.arr) tt.arr = ["foo", "bar"] self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append, 'baz') self.assertIsNone(tt2.arr) tt2.arr = None self.assertRaises(exc.InvalidArtifactPropertyValue, tt.arr.append, 'baz') class TestArtifactType(test_utils.BaseTestCase): def test_create_artifact(self): a = defs.ArtifactType(**get_artifact_fixture()) self.assertIsNotNone(a) self.assertEqual("123", a.id) self.assertEqual("ArtifactType", a.type_name) self.assertEqual("1.0", a.type_version) self.assertEqual("11.2", a.version) self.assertEqual("Foo", a.name) self.assertEqual("private", a.visibility) self.assertEqual("creating", a.state) self.assertEqual("my_tenant", a.owner) self.assertEqual(a.created_at, a.updated_at) self.assertIsNone(a.description) self.assertIsNone(a.published_at) self.assertIsNone(a.deleted_at) self.assertIsNone(a.description) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "id", "foo") self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "state", "active") self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "owner", "some other") self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "created_at", datetime.datetime.now()) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "deleted_at", datetime.datetime.now()) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "updated_at", datetime.datetime.now()) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "published_at", datetime.datetime.now()) self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, a, "visibility", "wrong") def test_dependency_prop(self): class DerivedType(defs.ArtifactType): depends_on_any = defs.ArtifactReference() depends_on_self = defs.ArtifactReference(type_name='DerivedType') depends_on_self_version = defs.ArtifactReference( type_name='DerivedType', type_version='1.0') class DerivedTypeV11(DerivedType): __type_name__ = 'DerivedType' __type_version__ = '1.1' depends_on_self_version = defs.ArtifactReference( type_name='DerivedType', type_version='1.1') d1 = DerivedType(**get_artifact_fixture()) d2 = DerivedTypeV11(**get_artifact_fixture()) a = defs.ArtifactType(**get_artifact_fixture()) d1.depends_on_any = a self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, d1, 'depends_on_self', a) d1.depends_on_self = d2 d2.depends_on_self = d1 d1.depends_on_self_version = d1 d2.depends_on_self_version = d2 self.assertRaises(exc.InvalidArtifactPropertyValue, setattr, d1, 'depends_on_self_version', d2) def test_dependency_list(self): class FooType(defs.ArtifactType): pass class BarType(defs.ArtifactType): pass class TestType(defs.ArtifactType): depends_on = defs.ArtifactReferenceList() depends_on_self_or_foo = defs.ArtifactReferenceList( references=defs.ArtifactReference(['FooType', 'TestType'])) a = defs.ArtifactType(**get_artifact_fixture(id="1")) a_copy = defs.ArtifactType(**get_artifact_fixture(id="1")) b = defs.ArtifactType(**get_artifact_fixture(id="2")) tt = TestType(**get_artifact_fixture(id="3")) foo = FooType(**get_artifact_fixture(id='4')) bar = BarType(**get_artifact_fixture(id='4')) tt.depends_on.append(a) tt.depends_on.append(b) self.assertEqual([a, b], tt.depends_on) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.depends_on.append, a) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.depends_on.append, a_copy) tt.depends_on_self_or_foo.append(tt) tt.depends_on_self_or_foo.append(foo) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.depends_on_self_or_foo.append, bar) self.assertEqual([tt, foo], tt.depends_on_self_or_foo) def test_blob(self): class TestType(defs.ArtifactType): image_file = defs.BinaryObject(max_file_size=201054, min_locations=1, max_locations=5) screen_shots = defs.BinaryObjectList( objects=defs.BinaryObject(min_file_size=100), min_count=1) tt = TestType(**get_artifact_fixture()) blob = defs.Blob() blob.size = 1024 blob.locations.append("file://some.file.path") tt.image_file = blob self.assertEqual(1024, tt.image_file.size) self.assertEqual(["file://some.file.path"], tt.image_file.locations) def test_pre_publish_blob_validation(self): class TestType(defs.ArtifactType): required_blob = defs.BinaryObject(required=True) optional_blob = defs.BinaryObject() tt = TestType(**get_artifact_fixture()) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.__pre_publish__) tt.required_blob = defs.Blob(size=0) tt.__pre_publish__() def test_pre_publish_dependency_validation(self): class TestType(defs.ArtifactType): required_dependency = defs.ArtifactReference(required=True) optional_dependency = defs.ArtifactReference() tt = TestType(**get_artifact_fixture()) self.assertRaises(exc.InvalidArtifactPropertyValue, tt.__pre_publish__) tt.required_dependency = defs.ArtifactType(**get_artifact_fixture()) tt.__pre_publish__() def test_default_value_of_immutable_field_in_active_state(self): class TestType(defs.ArtifactType): foo = defs.String(default='Bar', mutable=False) tt = TestType(**get_artifact_fixture(state='active')) self.assertEqual('Bar', tt.foo) class SerTestType(defs.ArtifactType): some_string = defs.String() some_text = defs.Text() some_version = defs.SemVerString() some_int = defs.Integer() some_numeric = defs.Numeric() some_bool = defs.Boolean() some_array = defs.Array() another_array = defs.Array( item_type=[defs.Integer(), defs.Numeric(), defs.Boolean()]) some_dict = defs.Dict() another_dict = defs.Dict( properties={'foo': defs.Integer(), 'bar': defs.Boolean()}) some_ref = defs.ArtifactReference() some_ref_list = defs.ArtifactReferenceList() some_blob = defs.BinaryObject() some_blob_list = defs.BinaryObjectList() class TestSerialization(test_utils.BaseTestCase): def test_serialization_to_db(self): ref1 = defs.ArtifactType(**get_artifact_fixture(id="1")) ref2 = defs.ArtifactType(**get_artifact_fixture(id="2")) ref3 = defs.ArtifactType(**get_artifact_fixture(id="3")) blob1 = defs.Blob(size=100, locations=['http://example.com/blob1'], item_key='some_key', checksum='abc') blob2 = defs.Blob(size=200, locations=['http://example.com/blob2'], item_key='another_key', checksum='fff') blob3 = defs.Blob(size=300, locations=['http://example.com/blob3'], item_key='third_key', checksum='123') fixture = get_artifact_fixture() tt = SerTestType(**fixture) tt.some_string = 'bar' tt.some_text = 'bazz' tt.some_version = '11.22.33-beta' tt.some_int = 50 tt.some_numeric = 10.341 tt.some_bool = True tt.some_array = ['q', 'w', 'e', 'r', 't', 'y'] tt.another_array = [1, 1.2, False] tt.some_dict = {'foobar': "FOOBAR", 'baz': "QUX"} tt.another_dict = {'foo': 1, 'bar': True} tt.some_ref = ref1 tt.some_ref_list = [ref2, ref3] tt.some_blob = blob1 tt.some_blob_list = [blob2, blob3] results = serialization.serialize_for_db(tt) expected = fixture expected['type_name'] = 'SerTestType' expected['type_version'] = '1.0' expected['properties'] = { 'some_string': { 'type': 'string', 'value': 'bar' }, 'some_text': { 'type': 'text', 'value': 'bazz' }, 'some_version': { 'type': 'string', 'value': '11.22.33-beta' }, 'some_int': { 'type': 'int', 'value': 50 }, 'some_numeric': { 'type': 'numeric', 'value': 10.341 }, 'some_bool': { 'type': 'bool', 'value': True }, 'some_array': { 'type': 'array', 'value': [ { 'type': 'string', 'value': 'q' }, { 'type': 'string', 'value': 'w' }, { 'type': 'string', 'value': 'e' }, { 'type': 'string', 'value': 'r' }, { 'type': 'string', 'value': 't' }, { 'type': 'string', 'value': 'y' } ] }, 'another_array': { 'type': 'array', 'value': [ { 'type': 'int', 'value': 1 }, { 'type': 'numeric', 'value': 1.2 }, { 'type': 'bool', 'value': False } ] }, 'some_dict.foobar': { 'type': 'string', 'value': 'FOOBAR' }, 'some_dict.baz': { 'type': 'string', 'value': 'QUX' }, 'another_dict.foo': { 'type': 'int', 'value': 1 }, 'another_dict.bar': { 'type': 'bool', 'value': True } } expected['dependencies'] = { 'some_ref': ['1'], 'some_ref_list': ['2', '3'] } expected['blobs'] = { 'some_blob': [ { 'size': 100, 'checksum': 'abc', 'item_key': 'some_key', 'locations': ['http://example.com/blob1'] }], 'some_blob_list': [ { 'size': 200, 'checksum': 'fff', 'item_key': 'another_key', 'locations': ['http://example.com/blob2'] }, { 'size': 300, 'checksum': '123', 'item_key': 'third_key', 'locations': ['http://example.com/blob3'] } ] } self.assertEqual(expected, results) def test_deserialize_from_db(self): ts = datetime.datetime.now() db_dict = { "type_name": 'SerTestType', "type_version": '1.0', "id": "123", "version": "11.2", "description": None, "name": "Foo", "visibility": "private", "state": "creating", "owner": "my_tenant", "created_at": ts, "updated_at": ts, "deleted_at": None, "published_at": None, "tags": ["test", "fixture"], "properties": { 'some_string': { 'type': 'string', 'value': 'bar' }, 'some_text': { 'type': 'text', 'value': 'bazz' }, 'some_version': { 'type': 'string', 'value': '11.22.33-beta' }, 'some_int': { 'type': 'int', 'value': 50 }, 'some_numeric': { 'type': 'numeric', 'value': 10.341 }, 'some_bool': { 'type': 'bool', 'value': True }, 'some_array': { 'type': 'array', 'value': [ { 'type': 'string', 'value': 'q' }, { 'type': 'string', 'value': 'w' }, { 'type': 'string', 'value': 'e' }, { 'type': 'string', 'value': 'r' }, { 'type': 'string', 'value': 't' }, { 'type': 'string', 'value': 'y' } ] }, 'another_array': { 'type': 'array', 'value': [ { 'type': 'int', 'value': 1 }, { 'type': 'numeric', 'value': 1.2 }, { 'type': 'bool', 'value': False } ] }, 'some_dict.foobar': { 'type': 'string', 'value': 'FOOBAR' }, 'some_dict.baz': { 'type': 'string', 'value': 'QUX' }, 'another_dict.foo': { 'type': 'int', 'value': 1 }, 'another_dict.bar': { 'type': 'bool', 'value': True } }, 'blobs': { 'some_blob': [ { 'size': 100, 'checksum': 'abc', 'item_key': 'some_key', 'locations': ['http://example.com/blob1'] }], 'some_blob_list': [ { 'size': 200, 'checksum': 'fff', 'item_key': 'another_key', 'locations': ['http://example.com/blob2'] }, { 'size': 300, 'checksum': '123', 'item_key': 'third_key', 'locations': ['http://example.com/blob3'] } ] }, 'dependencies': { 'some_ref': [ { "type_name": 'ArtifactType', "type_version": '1.0', "id": "1", "version": "11.2", "description": None, "name": "Foo", "visibility": "private", "state": "creating", "owner": "my_tenant", "created_at": ts, "updated_at": ts, "deleted_at": None, "published_at": None, "tags": ["test", "fixture"], "properties": {}, "blobs": {}, "dependencies": {} } ], 'some_ref_list': [ { "type_name": 'ArtifactType', "type_version": '1.0', "id": "2", "version": "11.2", "description": None, "name": "Foo", "visibility": "private", "state": "creating", "owner": "my_tenant", "created_at": ts, "updated_at": ts, "deleted_at": None, "published_at": None, "tags": ["test", "fixture"], "properties": {}, "blobs": {}, "dependencies": {} }, { "type_name": 'ArtifactType', "type_version": '1.0', "id": "3", "version": "11.2", "description": None, "name": "Foo", "visibility": "private", "state": "creating", "owner": "my_tenant", "created_at": ts, "updated_at": ts, "deleted_at": None, "published_at": None, "tags": ["test", "fixture"], "properties": {}, "blobs": {}, "dependencies": {} } ] } } plugins_dict = {'SerTestType': [SerTestType], 'ArtifactType': [defs.ArtifactType]} def _retrieve_plugin(name, version): return next((p for p in plugins_dict.get(name, []) if version and p.version == version), plugins_dict.get(name, [None])[0]) plugins = mock.Mock() plugins.get_class_by_typename = _retrieve_plugin art = serialization.deserialize_from_db(db_dict, plugins) self.assertEqual('123', art.id) self.assertEqual('11.2', art.version) self.assertIsNone(art.description) self.assertEqual('Foo', art.name) self.assertEqual('private', art.visibility) self.assertEqual('private', art.visibility) def get_artifact_fixture(**kwargs): ts = datetime.datetime.now() fixture = { "id": "123", "version": "11.2", "description": None, "name": "Foo", "visibility": "private", "state": "creating", "owner": "my_tenant", "created_at": ts, "updated_at": ts, "deleted_at": None, "published_at": None, "tags": ["test", "fixture"] } fixture.update(kwargs) return fixture glance-12.0.0/glance/tests/unit/test_store_image.py0000664000567000056710000011772412701407047023463 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store import mock from debtcollector import removals from glance.common import exception from glance.common import signature_utils import glance.location from glance.tests.unit import base as unit_test_base from glance.tests.unit import utils as unit_test_utils from glance.tests import utils BASE_URI = 'http://storeurl.com/container' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '228c6da5-29cd-4d67-9457-ed632e083fc0' class ImageRepoStub(object): def add(self, image): return image def save(self, image, from_state=None): return image class ImageStub(object): def __init__(self, image_id, status=None, locations=None, visibility=None, extra_properties=None): self.image_id = image_id self.status = status self.locations = locations or [] self.visibility = visibility self.size = 1 self.extra_properties = extra_properties or {} def delete(self): self.status = 'deleted' def get_member_repo(self): return FakeMemberRepo(self, [TENANT1, TENANT2]) class ImageFactoryStub(object): def new_image(self, image_id=None, name=None, visibility='private', min_disk=0, min_ram=0, protected=False, owner=None, disk_format=None, container_format=None, extra_properties=None, tags=None, **other_args): return ImageStub(image_id, visibility=visibility, extra_properties=extra_properties, **other_args) class FakeMemberRepo(object): def __init__(self, image, tenants=None): self.image = image self.factory = glance.domain.ImageMemberFactory() self.tenants = tenants or [] def list(self, *args, **kwargs): return [self.factory.new_image_member(self.image, tenant) for tenant in self.tenants] def add(self, member): self.tenants.append(member.member_id) def remove(self, member): self.tenants.remove(member.member_id) class TestStoreImage(utils.BaseTestCase): def setUp(self): locations = [{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}] self.image_stub = ImageStub(UUID1, 'active', locations) self.store_api = unit_test_utils.FakeStoreAPI() self.store_utils = unit_test_utils.FakeStoreUtils(self.store_api) super(TestStoreImage, self).setUp() def test_image_delete(self): image = glance.location.ImageProxy(self.image_stub, {}, self.store_api, self.store_utils) location = image.locations[0] self.assertEqual('active', image.status) self.store_api.get_from_backend(location['url'], context={}) image.delete() self.assertEqual('deleted', image.status) self.assertRaises(glance_store.NotFound, self.store_api.get_from_backend, location['url'], {}) def test_image_get_data(self): image = glance.location.ImageProxy(self.image_stub, {}, self.store_api, self.store_utils) self.assertEqual('XXX', image.get_data()) def test_image_get_data_from_second_location(self): def fake_get_from_backend(self, location, offset=0, chunk_size=None, context=None): if UUID1 in location: raise Exception('not allow download from %s' % location) else: return self.data[location] image1 = glance.location.ImageProxy(self.image_stub, {}, self.store_api, self.store_utils) self.assertEqual('XXX', image1.get_data()) # Multiple location support context = glance.context.RequestContext(user=USER1) (image2, image_stub2) = self._add_image(context, UUID2, 'ZZZ', 3) location_data = image2.locations[0] image1.locations.append(location_data) self.assertEqual(2, len(image1.locations)) self.assertEqual(UUID2, location_data['url']) self.stubs.Set(unit_test_utils.FakeStoreAPI, 'get_from_backend', fake_get_from_backend) # This time, image1.get_data() returns the data wrapped in a # LimitingReader|CooperativeReader pipeline, so peeking under # the hood of those objects to get at the underlying string. self.assertEqual('ZZZ', image1.get_data().data.fd) image1.locations.pop(0) self.assertEqual(1, len(image1.locations)) image2.delete() def test_image_set_data(self): context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', 4) self.assertEqual(4, image.size) # NOTE(markwash): FakeStore returns image_id for location self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) def test_image_set_data_location_metadata(self): context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) loc_meta = {'key': 'value5032'} store_api = unit_test_utils.FakeStoreAPI(store_metadata=loc_meta) store_utils = unit_test_utils.FakeStoreUtils(store_api) image = glance.location.ImageProxy(image_stub, context, store_api, store_utils) image.set_data('YYYY', 4) self.assertEqual(4, image.size) location_data = image.locations[0] self.assertEqual(UUID2, location_data['url']) self.assertEqual(loc_meta, location_data['metadata']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) image.delete() self.assertEqual(image.status, 'deleted') self.assertRaises(glance_store.NotFound, self.store_api.get_from_backend, image.locations[0]['url'], {}) def test_image_set_data_unknown_size(self): context = glance.context.RequestContext(user=USER1) image_stub = ImageStub(UUID2, status='queued', locations=[]) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', None) self.assertEqual(4, image.size) # NOTE(markwash): FakeStore returns image_id for location self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) image.delete() self.assertEqual(image.status, 'deleted') self.assertRaises(glance_store.NotFound, self.store_api.get_from_backend, image.locations[0]['url'], context={}) @removals.remove(message="This will be removed in the N cycle.") def test_old_image_set_data_valid_signature(self): context = glance.context.RequestContext(user=USER1) extra_properties = { 'signature_certificate_uuid': 'UUID', 'signature_hash_method': 'METHOD', 'signature_key_type': 'TYPE', 'signature': 'VALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.stubs.Set(signature_utils, 'verify_signature', unit_test_utils.fake_old_verify_signature) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', 4) self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) self.assertEqual('active', image.status) @removals.remove(message="This will be removed in the N cycle.") def test_old_image_set_data_invalid_signature(self): context = glance.context.RequestContext(user=USER1) extra_properties = { 'signature_certificate_uuid': 'UUID', 'signature_hash_method': 'METHOD', 'signature_key_type': 'TYPE', 'signature': 'INVALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.stubs.Set(signature_utils, 'verify_signature', unit_test_utils.fake_old_verify_signature) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) self.assertRaises(exception.SignatureVerificationError, image.set_data, 'YYYY', 4) @removals.remove(message="This will be removed in the N cycle.") def test_old_image_set_data_invalid_signature_missing_metadata(self): context = glance.context.RequestContext(user=USER1) extra_properties = { 'signature_hash_method': 'METHOD', 'signature_key_type': 'TYPE', 'signature': 'INVALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.stubs.Set(signature_utils, 'verify_signature', unit_test_utils.fake_old_verify_signature) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', 4) self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) # Image is still active, since invalid signature was ignored self.assertEqual('active', image.status) @mock.patch('glance.location.LOG') def test_image_set_data_valid_signature(self, mock_log): context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_certificate_uuid': 'UUID', 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'VALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.stubs.Set(signature_utils, 'get_verifier', unit_test_utils.fake_get_verifier) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', 4) self.assertEqual('active', image.status) mock_log.info.assert_called_once_with( u'Successfully verified signature for image %s', UUID2) def test_image_set_data_invalid_signature(self): context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_certificate_uuid': 'UUID', 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'INVALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.stubs.Set(signature_utils, 'get_verifier', unit_test_utils.fake_get_verifier) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) self.assertRaises(exception.SignatureVerificationError, image.set_data, 'YYYY', 4) def test_image_set_data_invalid_signature_missing_metadata(self): context = glance.context.RequestContext(user=USER1) extra_properties = { 'img_signature_hash_method': 'METHOD', 'img_signature_key_type': 'TYPE', 'img_signature': 'INVALID' } image_stub = ImageStub(UUID2, status='queued', extra_properties=extra_properties) self.stubs.Set(signature_utils, 'get_verifier', unit_test_utils.fake_get_verifier) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data('YYYY', 4) self.assertEqual(UUID2, image.locations[0]['url']) self.assertEqual('Z', image.checksum) # Image is still active, since invalid signature was ignored self.assertEqual('active', image.status) def _add_image(self, context, image_id, data, len): image_stub = ImageStub(image_id, status='queued', locations=[]) image = glance.location.ImageProxy(image_stub, context, self.store_api, self.store_utils) image.set_data(data, len) self.assertEqual(len, image.size) # NOTE(markwash): FakeStore returns image_id for location location = {'url': image_id, 'metadata': {}, 'status': 'active'} self.assertEqual([location], image.locations) self.assertEqual([location], image_stub.locations) self.assertEqual('active', image.status) return (image, image_stub) def test_image_change_append_invalid_location_uri(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, image1.locations.append, location_bad) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_append_invalid_location_metatdata(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) # Using only one test rule here is enough to make sure # 'store.check_location_metadata()' can be triggered # in Location proxy layer. Complete test rule for # 'store.check_location_metadata()' testing please # check below cases within 'TestStoreMetaDataChecker'. location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} self.assertRaises(glance_store.BackendException, image1.locations.append, location_bad) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_append_locations(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} image1.locations.append(location3) self.assertEqual([location2, location3], image_stub1.locations) self.assertEqual([location2, location3], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image2.delete() def test_image_change_pop_location(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} image1.locations.append(location3) self.assertEqual([location2, location3], image_stub1.locations) self.assertEqual([location2, location3], image1.locations) image1.locations.pop() self.assertEqual([location2], image_stub1.locations) self.assertEqual([location2], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image2.delete() def test_image_change_extend_invalid_locations_uri(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, image1.locations.extend, [location_bad]) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_extend_invalid_locations_metadata(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} self.assertRaises(glance_store.BackendException, image1.locations.extend, [location_bad]) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_extend_locations(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} image1.locations.extend([location3]) self.assertEqual([location2, location3], image_stub1.locations) self.assertEqual([location2, location3], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image2.delete() def test_image_change_remove_location(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} location_bad = {'url': 'unknown://location', 'metadata': {}} image1.locations.extend([location3]) image1.locations.remove(location2) self.assertEqual([location3], image_stub1.locations) self.assertEqual([location3], image1.locations) self.assertRaises(ValueError, image1.locations.remove, location_bad) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_delete_location(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) del image1.locations[0] self.assertEqual([], image_stub1.locations) self.assertEqual(0, len(image1.locations)) self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) image1.delete() def test_image_change_insert_invalid_location_uri(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, image1.locations.insert, 0, location_bad) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_insert_invalid_location_metadata(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location_bad = {'url': UUID3, 'metadata': b"a invalid metadata"} self.assertRaises(glance_store.BackendException, image1.locations.insert, 0, location_bad) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_insert_location(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}, 'status': 'active'} location3 = {'url': UUID3, 'metadata': {}, 'status': 'active'} image1.locations.insert(0, location3) self.assertEqual([location3, location2], image_stub1.locations) self.assertEqual([location3, location2], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image2.delete() def test_image_change_delete_locations(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} image1.locations.insert(0, location3) del image1.locations[0:100] self.assertEqual([], image_stub1.locations) self.assertEqual(0, len(image1.locations)) self.assertRaises(exception.BadStoreUri, image1.locations.insert, 0, location2) self.assertRaises(exception.BadStoreUri, image2.locations.insert, 0, location3) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) def test_image_change_adding_invalid_location_uri(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) image_stub1 = ImageStub('fake_image_id', status='queued', locations=[]) image1 = glance.location.ImageProxy(image_stub1, context, self.store_api, self.store_utils) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, image1.locations.__iadd__, [location_bad]) self.assertEqual([], image_stub1.locations) self.assertEqual([], image1.locations) image1.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_adding_invalid_location_metadata(self): self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) image_stub2 = ImageStub('fake_image_id', status='queued', locations=[]) image2 = glance.location.ImageProxy(image_stub2, context, self.store_api, self.store_utils) location_bad = {'url': UUID2, 'metadata': b"a invalid metadata"} self.assertRaises(glance_store.BackendException, image2.locations.__iadd__, [location_bad]) self.assertEqual([], image_stub2.locations) self.assertEqual([], image2.locations) image1.delete() image2.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) def test_image_change_adding_locations(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} image3.locations += [location2, location3] self.assertEqual([location2, location3], image_stub3.locations) self.assertEqual([location2, location3], image3.locations) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() def test_image_get_location_index(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} image3.locations += [location2, location3] self.assertEqual(1, image_stub3.locations.index(location3)) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() def test_image_get_location_by_index(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} image3.locations += [location2, location3] self.assertEqual(1, image_stub3.locations.index(location3)) self.assertEqual(location2, image_stub3.locations[0]) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() def test_image_checking_location_exists(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} location_bad = {'url': 'unknown://location', 'metadata': {}} image3.locations += [location2, location3] self.assertIn(location3, image_stub3.locations) self.assertNotIn(location_bad, image_stub3.locations) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() def test_image_reverse_locations_order(self): UUID3 = 'a8a61ec4-d7a3-11e2-8c28-000c29c27581' self.assertEqual(2, len(self.store_api.data.keys())) context = glance.context.RequestContext(user=USER1) (image1, image_stub1) = self._add_image(context, UUID2, 'XXXX', 4) (image2, image_stub2) = self._add_image(context, UUID3, 'YYYY', 4) location2 = {'url': UUID2, 'metadata': {}} location3 = {'url': UUID3, 'metadata': {}} image_stub3 = ImageStub('fake_image_id', status='queued', locations=[]) image3 = glance.location.ImageProxy(image_stub3, context, self.store_api, self.store_utils) image3.locations += [location2, location3] image_stub3.locations.reverse() self.assertEqual([location3, location2], image_stub3.locations) self.assertEqual([location3, location2], image3.locations) image3.delete() self.assertEqual(2, len(self.store_api.data.keys())) self.assertNotIn(UUID2, self.store_api.data.keys()) self.assertNotIn(UUID3, self.store_api.data.keys()) image1.delete() image2.delete() class TestStoreImageRepo(utils.BaseTestCase): def setUp(self): super(TestStoreImageRepo, self).setUp() self.store_api = unit_test_utils.FakeStoreAPI() store_utils = unit_test_utils.FakeStoreUtils(self.store_api) self.image_stub = ImageStub(UUID1) self.image = glance.location.ImageProxy(self.image_stub, {}, self.store_api, store_utils) self.image_repo_stub = ImageRepoStub() self.image_repo = glance.location.ImageRepoProxy(self.image_repo_stub, {}, self.store_api, store_utils) patcher = mock.patch("glance.location._get_member_repo_for_store", self.get_fake_member_repo) patcher.start() self.addCleanup(patcher.stop) self.fake_member_repo = FakeMemberRepo(self.image, [TENANT1, TENANT2]) self.image_member_repo = glance.location.ImageMemberRepoProxy( self.fake_member_repo, self.image, {}, self.store_api) def get_fake_member_repo(self, image, context, db_api, store_api): return FakeMemberRepo(self.image, [TENANT1, TENANT2]) def test_add_updates_acls(self): self.image_stub.locations = [{'url': 'foo', 'metadata': {}, 'status': 'active'}, {'url': 'bar', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'public' self.image_repo.add(self.image) self.assertTrue(self.store_api.acls['foo']['public']) self.assertEqual([], self.store_api.acls['foo']['read']) self.assertEqual([], self.store_api.acls['foo']['write']) self.assertTrue(self.store_api.acls['bar']['public']) self.assertEqual([], self.store_api.acls['bar']['read']) self.assertEqual([], self.store_api.acls['bar']['write']) def test_add_ignores_acls_if_no_locations(self): self.image_stub.locations = [] self.image_stub.visibility = 'public' self.image_repo.add(self.image) self.assertEqual(0, len(self.store_api.acls)) def test_save_updates_acls(self): self.image_stub.locations = [{'url': 'foo', 'metadata': {}, 'status': 'active'}] self.image_repo.save(self.image) self.assertIn('foo', self.store_api.acls) def test_add_fetches_members_if_private(self): self.image_stub.locations = [{'url': 'glue', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'private' self.image_repo.add(self.image) self.assertIn('glue', self.store_api.acls) acls = self.store_api.acls['glue'] self.assertFalse(acls['public']) self.assertEqual([], acls['write']) self.assertEqual([TENANT1, TENANT2], acls['read']) def test_save_fetches_members_if_private(self): self.image_stub.locations = [{'url': 'glue', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'private' self.image_repo.save(self.image) self.assertIn('glue', self.store_api.acls) acls = self.store_api.acls['glue'] self.assertFalse(acls['public']) self.assertEqual([], acls['write']) self.assertEqual([TENANT1, TENANT2], acls['read']) def test_member_addition_updates_acls(self): self.image_stub.locations = [{'url': 'glug', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'private' membership = glance.domain.ImageMembership( UUID1, TENANT3, None, None, status='accepted') self.image_member_repo.add(membership) self.assertIn('glug', self.store_api.acls) acls = self.store_api.acls['glug'] self.assertFalse(acls['public']) self.assertEqual([], acls['write']) self.assertEqual([TENANT1, TENANT2, TENANT3], acls['read']) def test_member_removal_updates_acls(self): self.image_stub.locations = [{'url': 'glug', 'metadata': {}, 'status': 'active'}] self.image_stub.visibility = 'private' membership = glance.domain.ImageMembership( UUID1, TENANT1, None, None, status='accepted') self.image_member_repo.remove(membership) self.assertIn('glug', self.store_api.acls) acls = self.store_api.acls['glug'] self.assertFalse(acls['public']) self.assertEqual([], acls['write']) self.assertEqual([TENANT2], acls['read']) class TestImageFactory(unit_test_base.StoreClearingUnitTest): def setUp(self): super(TestImageFactory, self).setUp() store_api = unit_test_utils.FakeStoreAPI() store_utils = unit_test_utils.FakeStoreUtils(store_api) self.image_factory = glance.location.ImageFactoryProxy( ImageFactoryStub(), glance.context.RequestContext(user=USER1), store_api, store_utils) def test_new_image(self): image = self.image_factory.new_image() self.assertIsNone(image.image_id) self.assertIsNone(image.status) self.assertEqual('private', image.visibility) self.assertEqual([], image.locations) def test_new_image_with_location(self): locations = [{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}}] image = self.image_factory.new_image(locations=locations) self.assertEqual(locations, image.locations) location_bad = {'url': 'unknown://location', 'metadata': {}} self.assertRaises(exception.BadStoreUri, self.image_factory.new_image, locations=[location_bad]) class TestStoreMetaDataChecker(utils.BaseTestCase): def test_empty(self): glance_store.check_location_metadata({}) def test_unicode(self): m = {'key': u'somevalue'} glance_store.check_location_metadata(m) def test_unicode_list(self): m = {'key': [u'somevalue', u'2']} glance_store.check_location_metadata(m) def test_unicode_dict(self): inner = {'key1': u'somevalue', 'key2': u'somevalue'} m = {'topkey': inner} glance_store.check_location_metadata(m) def test_unicode_dict_list(self): inner = {'key1': u'somevalue', 'key2': u'somevalue'} m = {'topkey': inner, 'list': [u'somevalue', u'2'], 'u': u'2'} glance_store.check_location_metadata(m) def test_nested_dict(self): inner = {'key1': u'somevalue', 'key2': u'somevalue'} inner = {'newkey': inner} inner = {'anotherkey': inner} m = {'topkey': inner} glance_store.check_location_metadata(m) def test_simple_bad(self): m = {'key1': object()} self.assertRaises(glance_store.BackendException, glance_store.check_location_metadata, m) def test_list_bad(self): m = {'key1': [u'somevalue', object()]} self.assertRaises(glance_store.BackendException, glance_store.check_location_metadata, m) def test_nested_dict_bad(self): inner = {'key1': u'somevalue', 'key2': object()} inner = {'newkey': inner} inner = {'anotherkey': inner} m = {'topkey': inner} self.assertRaises(glance_store.BackendException, glance_store.check_location_metadata, m) glance-12.0.0/glance/tests/unit/test_jsonpatchmixin.py0000664000567000056710000000616512701407047024217 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance.common.exception as exc import glance.common.jsonpatchvalidator as jpv import glance.tests.utils as utils class TestValidator(jpv.JsonPatchValidatorMixin): def __init__(self, methods_allowed=None): if methods_allowed is None: methods_allowed = ["replace", "add"] super(TestValidator, self).__init__(methods_allowed) class TestJsonPatchMixin(utils.BaseTestCase): def test_body_validation(self): validator = TestValidator() validator.validate_body( [{"op": "replace", "path": "/param", "value": "ok"}]) # invalid if not a list of [{"op": "", "path": "", "value": ""}] # is passed self.assertRaises(exc.JsonPatchException, validator.validate_body, {"op": "replace", "path": "/me", "value": "should be a list"}) def test_value_validation(self): # a string, a list and a dict are valid value types validator = TestValidator() validator.validate_body( [{"op": "replace", "path": "/param", "value": "ok string"}]) validator.validate_body( [{"op": "replace", "path": "/param", "value": ["ok list", "really ok"]}]) validator.validate_body( [{"op": "replace", "path": "/param", "value": {"ok": "dict"}}]) def test_op_validation(self): validator = TestValidator(methods_allowed=["replace", "add", "copy"]) validator.validate_body( [{"op": "copy", "path": "/param", "value": "ok"}, {"op": "replace", "path": "/param/1", "value": "ok"}]) self.assertRaises( exc.JsonPatchException, validator.validate_body, [{"op": "test", "path": "/param", "value": "not allowed"}]) self.assertRaises(exc.JsonPatchException, validator.validate_body, [{"op": "nosuchmethodatall", "path": "/param", "value": "no way"}]) def test_path_validation(self): validator = TestValidator() bad_body_part = {"op": "add", "value": "bad path"} for bad_path in ["/param/", "param", "//param", "/param~2", "/param~"]: bad_body_part["path"] = bad_path bad_body = [bad_body_part] self.assertRaises(exc.JsonPatchException, validator.validate_body, bad_body) ok_body = [{"op": "add", "value": "some value", "path": "/param~1/param~0"}] body = validator.validate_body(ok_body)[0] self.assertEqual("param//param~", body["path"]) glance-12.0.0/glance/tests/unit/test_store_glare.py0000664000567000056710000000575012701407047023466 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from glance.common.glare import definitions import glance.context from glance.glare.domain import proxy from glance.glare import location from glance.tests.unit import utils as unit_test_utils from glance.tests import utils BASE_URI = 'http://storeurl.com/container' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '228c6da5-29cd-4d67-9457-ed632e083fc0' class ArtifactStub(definitions.ArtifactType): file = definitions.BinaryObject() file_list = definitions.BinaryObjectList() class TestStoreArtifact(utils.BaseTestCase): def setUp(self): self.store_api = unit_test_utils.FakeStoreAPI() self.store_utils = unit_test_utils.FakeStoreUtils(self.store_api) ts = datetime.now() self.artifact_stub = ArtifactStub(id=UUID2, state='creating', created_at=ts, updated_at=ts, version='1.0', owner='me', name='foo') super(TestStoreArtifact, self).setUp() def test_set_blob_data(self): context = glance.context.RequestContext(user=USER1) helper = proxy.ArtifactHelper(location.ArtifactProxy, proxy_kwargs={ 'context': context, 'store_api': self.store_api, 'store_utils': self.store_utils }) artifact = helper.proxy(self.artifact_stub) artifact.file = ('YYYY', 4) self.assertEqual(4, artifact.file.size) def test_set_bloblist_data(self): context = glance.context.RequestContext(user=USER1) helper = proxy.ArtifactHelper(location.ArtifactProxy, proxy_kwargs={ 'context': context, 'store_api': self.store_api, 'store_utils': self.store_utils }) artifact = helper.proxy(self.artifact_stub) artifact.file_list.append(('YYYY', 4)) self.assertEqual(4, artifact.file_list[0].size) glance-12.0.0/glance/tests/unit/test_manage.py0000664000567000056710000003254612701407047022413 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from oslo_db.sqlalchemy import migration from six.moves import StringIO from glance.cmd import manage from glance.db import migration as db_migration from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import metadata as db_metadata from glance.tests import utils as test_utils class TestManageBase(test_utils.BaseTestCase): def setUp(self): super(TestManageBase, self).setUp() def clear_conf(): manage.CONF.reset() manage.CONF.unregister_opt(manage.command_opt) clear_conf() self.addCleanup(clear_conf) self.useFixture(fixtures.MonkeyPatch( 'oslo_log.log.setup', lambda product_name, version='test': None)) patcher = mock.patch('glance.db.sqlalchemy.api.get_engine') patcher.start() self.addCleanup(patcher.stop) def _main_test_helper(self, argv, func_name=None, *exp_args, **exp_kwargs): self.useFixture(fixtures.MonkeyPatch('sys.argv', argv)) manage.main() func_name.assert_called_once_with(*exp_args, **exp_kwargs) class TestLegacyManage(TestManageBase): @mock.patch.object(migration, 'db_version') def test_legacy_db_version(self, db_version): with mock.patch('sys.stdout', new_callable=StringIO): self._main_test_helper(['glance.cmd.manage', 'db_version'], migration.db_version, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, 0) @mock.patch.object(migration, 'db_sync') def test_legacy_db_sync(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db_sync'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, None) @mock.patch.object(migration, 'db_sync') def test_legacy_db_upgrade(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db_upgrade'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, None) @mock.patch.object(migration, 'db_version_control') def test_legacy_db_version_control(self, db_version_control): self._main_test_helper(['glance.cmd.manage', 'db_version_control'], migration.db_version_control, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, None) @mock.patch.object(migration, 'db_sync') def test_legacy_db_sync_version(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db_sync', '20'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, '20') @mock.patch.object(migration, 'db_sync') def test_legacy_db_upgrade_version(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db_upgrade', '20'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, '20') @mock.patch.object(migration, 'db_sync') def test_legacy_db_downgrade_version(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db_downgrade', '20'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, '20') def test_db_metadefs_unload(self): db_metadata.db_unload_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_unload_metadefs'], db_metadata.db_unload_metadefs, db_api.get_engine()) def test_db_metadefs_load(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs'], db_metadata.db_load_metadefs, db_api.get_engine(), None, None, None, None) def test_db_metadefs_load_with_specified_path(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', '/mock/'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', None, None, None) def test_db_metadefs_load_from_path_merge(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', '/mock/', 'True'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', 'True', None, None) def test_db_metadefs_load_from_merge_and_prefer_new(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', '/mock/', 'True', 'True'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', 'True', 'True', None) def test_db_metadefs_load_from_merge_and_prefer_new_and_overwrite(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_load_metadefs', '/mock/', 'True', 'True', 'True'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', 'True', 'True', 'True') def test_db_metadefs_export(self): db_metadata.db_export_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_export_metadefs'], db_metadata.db_export_metadefs, db_api.get_engine(), None) def test_db_metadefs_export_with_specified_path(self): db_metadata.db_export_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db_export_metadefs', '/mock/'], db_metadata.db_export_metadefs, db_api.get_engine(), '/mock/') class TestManage(TestManageBase): @mock.patch.object(migration, 'db_version') def test_db_version(self, db_version): with mock.patch('sys.stdout', new_callable=StringIO): self._main_test_helper(['glance.cmd.manage', 'db', 'version'], migration.db_version, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, 0) @mock.patch.object(migration, 'db_sync') def test_db_sync(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db', 'sync'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, None) @mock.patch.object(migration, 'db_sync') def test_db_upgrade(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db', 'upgrade'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, None) @mock.patch.object(migration, 'db_version_control') def test_db_version_control(self, db_version_control): self._main_test_helper(['glance.cmd.manage', 'db', 'version_control'], migration.db_version_control, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, None) @mock.patch.object(migration, 'db_sync') def test_db_sync_version(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db', 'sync', '20'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, '20') @mock.patch.object(migration, 'db_sync') def test_db_upgrade_version(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db', 'upgrade', '20'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, '20') @mock.patch.object(migration, 'db_sync') def test_db_downgrade_version(self, db_sync): self._main_test_helper(['glance.cmd.manage', 'db', 'downgrade', '20'], migration.db_sync, db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, '20') def test_db_metadefs_unload(self): db_metadata.db_unload_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'unload_metadefs'], db_metadata.db_unload_metadefs, db_api.get_engine()) def test_db_metadefs_load(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs'], db_metadata.db_load_metadefs, db_api.get_engine(), None, False, False, False) def test_db_metadefs_load_with_specified_path(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--path', '/mock/'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', False, False, False) def test_db_metadefs_load_prefer_new_with_path(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--path', '/mock/', '--merge', '--prefer_new'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', True, True, False) def test_db_metadefs_load_prefer_new(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--merge', '--prefer_new'], db_metadata.db_load_metadefs, db_api.get_engine(), None, True, True, False) def test_db_metadefs_load_overwrite_existing(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--merge', '--overwrite'], db_metadata.db_load_metadefs, db_api.get_engine(), None, True, False, True) def test_db_metadefs_load_prefer_new_and_overwrite_existing(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--merge', '--prefer_new', '--overwrite'], db_metadata.db_load_metadefs, db_api.get_engine(), None, True, True, True) def test_db_metadefs_load_from_path_overwrite_existing(self): db_metadata.db_load_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'load_metadefs', '--path', '/mock/', '--merge', '--overwrite'], db_metadata.db_load_metadefs, db_api.get_engine(), '/mock/', True, False, True) def test_db_metadefs_export(self): db_metadata.db_export_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs'], db_metadata.db_export_metadefs, db_api.get_engine(), None) def test_db_metadefs_export_with_specified_path(self): db_metadata.db_export_metadefs = mock.Mock() self._main_test_helper(['glance.cmd.manage', 'db', 'export_metadefs', '--path', '/mock/'], db_metadata.db_export_metadefs, db_api.get_engine(), '/mock/') glance-12.0.0/glance/tests/unit/test_policy.py0000664000567000056710000005361512701407047022462 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import mock import oslo_config.cfg import glance.api.policy from glance.common import exception import glance.context from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils from glance.tests import utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' class ImageRepoStub(object): def get(self, *args, **kwargs): return 'image_from_get' def save(self, *args, **kwargs): return 'image_from_save' def add(self, *args, **kwargs): return 'image_from_add' def list(self, *args, **kwargs): return ['image_from_list_0', 'image_from_list_1'] class ImageStub(object): def __init__(self, image_id=None, visibility='private', container_format='bear', disk_format='raw', status='active', extra_properties=None): if extra_properties is None: extra_properties = {} self.image_id = image_id self.visibility = visibility self.container_format = container_format self.disk_format = disk_format self.status = status self.extra_properties = extra_properties def delete(self): self.status = 'deleted' class ImageFactoryStub(object): def new_image(self, image_id=None, name=None, visibility='private', min_disk=0, min_ram=0, protected=False, owner=None, disk_format=None, container_format=None, extra_properties=None, tags=None, **other_args): self.visibility = visibility return 'new_image' class MemberRepoStub(object): image = None def add(self, image_member): image_member.output = 'member_repo_add' def get(self, *args, **kwargs): return 'member_repo_get' def save(self, image_member, from_state=None): image_member.output = 'member_repo_save' def list(self, *args, **kwargs): return 'member_repo_list' def remove(self, image_member): image_member.output = 'member_repo_remove' class ImageMembershipStub(object): def __init__(self, output=None): self.output = output class TaskRepoStub(object): def get(self, *args, **kwargs): return 'task_from_get' def add(self, *args, **kwargs): return 'task_from_add' def list(self, *args, **kwargs): return ['task_from_list_0', 'task_from_list_1'] class TaskStub(object): def __init__(self, task_id): self.task_id = task_id self.status = 'pending' def run(self, executor): self.status = 'processing' class TaskFactoryStub(object): def new_task(self, *args): return 'new_task' class TestPolicyEnforcer(base.IsolatedUnitTest): def test_policy_file_default_rules_default_location(self): enforcer = glance.api.policy.Enforcer() context = glance.context.RequestContext(roles=[]) enforcer.enforce(context, 'get_image', {}) def test_policy_file_custom_rules_default_location(self): rules = {"get_image": '!'} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer() context = glance.context.RequestContext(roles=[]) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'get_image', {}) def test_policy_file_custom_location(self): self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), group='oslo_policy') rules = {"get_image": '!'} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer() context = glance.context.RequestContext(roles=[]) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'get_image', {}) def test_policy_file_check(self): self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), group='oslo_policy') rules = {"get_image": '!'} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer() context = glance.context.RequestContext(roles=[]) self.assertEqual(False, enforcer.check(context, 'get_image', {})) class TestPolicyEnforcerNoFile(base.IsolatedUnitTest): def test_policy_file_specified_but_not_found(self): """Missing defined policy file should result in a default ruleset""" self.config(policy_file='gobble.gobble', group='oslo_policy') enforcer = glance.api.policy.Enforcer() context = glance.context.RequestContext(roles=[]) enforcer.enforce(context, 'get_image', {}) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'manage_image_cache', {}) admin_context = glance.context.RequestContext(roles=['admin']) enforcer.enforce(admin_context, 'manage_image_cache', {}) def test_policy_file_default_not_found(self): """Missing default policy file should result in a default ruleset""" def fake_find_file(self, name): return None self.stubs.Set(oslo_config.cfg.ConfigOpts, 'find_file', fake_find_file) enforcer = glance.api.policy.Enforcer() context = glance.context.RequestContext(roles=[]) enforcer.enforce(context, 'get_image', {}) self.assertRaises(exception.Forbidden, enforcer.enforce, context, 'manage_image_cache', {}) admin_context = glance.context.RequestContext(roles=['admin']) enforcer.enforce(admin_context, 'manage_image_cache', {}) class TestImagePolicy(test_utils.BaseTestCase): def setUp(self): self.image_stub = ImageStub(UUID1) self.image_repo_stub = ImageRepoStub() self.image_factory_stub = ImageFactoryStub() self.policy = mock.Mock() self.policy.enforce = mock.Mock() super(TestImagePolicy, self).setUp() def test_publicize_image_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) self.assertRaises(exception.Forbidden, setattr, image, 'visibility', 'public') self.assertEqual('private', image.visibility) self.policy.enforce.assert_called_once_with({}, "publicize_image", image.target) def test_publicize_image_allowed(self): image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) image.visibility = 'public' self.assertEqual('public', image.visibility) self.policy.enforce.assert_called_once_with({}, "publicize_image", image.target) def test_delete_image_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) self.assertRaises(exception.Forbidden, image.delete) self.assertEqual('active', image.status) self.policy.enforce.assert_called_once_with({}, "delete_image", image.target) def test_delete_image_allowed(self): image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) image.delete() self.assertEqual('deleted', image.status) self.policy.enforce.assert_called_once_with({}, "delete_image", image.target) def test_get_image_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden image_target = mock.Mock() with mock.patch.object(glance.api.policy, 'ImageTarget') as target: target.return_value = image_target image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, {}, self.policy) self.assertRaises(exception.Forbidden, image_repo.get, UUID1) self.policy.enforce.assert_called_once_with({}, "get_image", image_target) def test_get_image_allowed(self): image_target = mock.Mock() with mock.patch.object(glance.api.policy, 'ImageTarget') as target: target.return_value = image_target image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, {}, self.policy) output = image_repo.get(UUID1) self.assertIsInstance(output, glance.api.policy.ImageProxy) self.assertEqual('image_from_get', output.image) self.policy.enforce.assert_called_once_with({}, "get_image", image_target) def test_get_images_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, {}, self.policy) self.assertRaises(exception.Forbidden, image_repo.list) self.policy.enforce.assert_called_once_with({}, "get_images", {}) def test_get_images_allowed(self): image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, {}, self.policy) images = image_repo.list() for i, image in enumerate(images): self.assertIsInstance(image, glance.api.policy.ImageProxy) self.assertEqual('image_from_list_%d' % i, image.image) self.policy.enforce.assert_called_once_with({}, "get_images", {}) def test_modify_image_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, {}, self.policy) image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) self.assertRaises(exception.Forbidden, image_repo.save, image) self.policy.enforce.assert_called_once_with({}, "modify_image", image.target) def test_modify_image_allowed(self): image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, {}, self.policy) image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) image_repo.save(image) self.policy.enforce.assert_called_once_with({}, "modify_image", image.target) def test_add_image_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, {}, self.policy) image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) self.assertRaises(exception.Forbidden, image_repo.add, image) self.policy.enforce.assert_called_once_with({}, "add_image", image.target) def test_add_image_allowed(self): image_repo = glance.api.policy.ImageRepoProxy(self.image_repo_stub, {}, self.policy) image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) image_repo.add(image) self.policy.enforce.assert_called_once_with({}, "add_image", image.target) def test_new_image_visibility(self): self.policy.enforce.side_effect = exception.Forbidden image_factory = glance.api.policy.ImageFactoryProxy( self.image_factory_stub, {}, self.policy) self.assertRaises(exception.Forbidden, image_factory.new_image, visibility='public') self.policy.enforce.assert_called_once_with({}, "publicize_image", {}) def test_new_image_visibility_public_allowed(self): image_factory = glance.api.policy.ImageFactoryProxy( self.image_factory_stub, {}, self.policy) image_factory.new_image(visibility='public') self.policy.enforce.assert_called_once_with({}, "publicize_image", {}) def test_image_get_data_policy_enforced_with_target(self): extra_properties = { 'test_key': 'test_4321' } image_stub = ImageStub(UUID1, extra_properties=extra_properties) with mock.patch('glance.api.policy.ImageTarget'): image = glance.api.policy.ImageProxy(image_stub, {}, self.policy) target = image.target self.policy.enforce.side_effect = exception.Forbidden self.assertRaises(exception.Forbidden, image.get_data) self.policy.enforce.assert_called_once_with({}, "download_image", target) def test_image_set_data(self): self.policy.enforce.side_effect = exception.Forbidden image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) self.assertRaises(exception.Forbidden, image.set_data) self.policy.enforce.assert_called_once_with({}, "upload_image", image.target) class TestMemberPolicy(test_utils.BaseTestCase): def setUp(self): self.policy = mock.Mock() self.policy.enforce = mock.Mock() self.image_stub = ImageStub(UUID1) image = glance.api.policy.ImageProxy(self.image_stub, {}, self.policy) self.member_repo = glance.api.policy.ImageMemberRepoProxy( MemberRepoStub(), image, {}, self.policy) self.target = self.member_repo.target super(TestMemberPolicy, self).setUp() def test_add_member_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden self.assertRaises(exception.Forbidden, self.member_repo.add, '') self.policy.enforce.assert_called_once_with({}, "add_member", self.target) def test_add_member_allowed(self): image_member = ImageMembershipStub() self.member_repo.add(image_member) self.assertEqual('member_repo_add', image_member.output) self.policy.enforce.assert_called_once_with({}, "add_member", self.target) def test_get_member_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden self.assertRaises(exception.Forbidden, self.member_repo.get, '') self.policy.enforce.assert_called_once_with({}, "get_member", self.target) def test_get_member_allowed(self): output = self.member_repo.get('') self.assertEqual('member_repo_get', output) self.policy.enforce.assert_called_once_with({}, "get_member", self.target) def test_modify_member_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden self.assertRaises(exception.Forbidden, self.member_repo.save, '') self.policy.enforce.assert_called_once_with({}, "modify_member", self.target) def test_modify_member_allowed(self): image_member = ImageMembershipStub() self.member_repo.save(image_member) self.assertEqual('member_repo_save', image_member.output) self.policy.enforce.assert_called_once_with({}, "modify_member", self.target) def test_get_members_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden self.assertRaises(exception.Forbidden, self.member_repo.list, '') self.policy.enforce.assert_called_once_with({}, "get_members", self.target) def test_get_members_allowed(self): output = self.member_repo.list('') self.assertEqual('member_repo_list', output) self.policy.enforce.assert_called_once_with({}, "get_members", self.target) def test_delete_member_not_allowed(self): self.policy.enforce.side_effect = exception.Forbidden self.assertRaises(exception.Forbidden, self.member_repo.remove, '') self.policy.enforce.assert_called_once_with({}, "delete_member", self.target) def test_delete_member_allowed(self): image_member = ImageMembershipStub() self.member_repo.remove(image_member) self.assertEqual('member_repo_remove', image_member.output) self.policy.enforce.assert_called_once_with({}, "delete_member", self.target) class TestTaskPolicy(test_utils.BaseTestCase): def setUp(self): self.task_stub = TaskStub(UUID1) self.task_repo_stub = TaskRepoStub() self.task_factory_stub = TaskFactoryStub() self.policy = unit_test_utils.FakePolicyEnforcer() super(TestTaskPolicy, self).setUp() def test_get_task_not_allowed(self): rules = {"get_task": False} self.policy.set_rules(rules) task_repo = glance.api.policy.TaskRepoProxy( self.task_repo_stub, {}, self.policy ) self.assertRaises(exception.Forbidden, task_repo.get, UUID1) def test_get_task_allowed(self): rules = {"get_task": True} self.policy.set_rules(rules) task_repo = glance.api.policy.TaskRepoProxy( self.task_repo_stub, {}, self.policy ) task = task_repo.get(UUID1) self.assertIsInstance(task, glance.api.policy.TaskProxy) self.assertEqual('task_from_get', task.task) def test_get_tasks_not_allowed(self): rules = {"get_tasks": False} self.policy.set_rules(rules) task_repo = glance.api.policy.TaskStubRepoProxy( self.task_repo_stub, {}, self.policy ) self.assertRaises(exception.Forbidden, task_repo.list) def test_get_tasks_allowed(self): rules = {"get_task": True} self.policy.set_rules(rules) task_repo = glance.api.policy.TaskStubRepoProxy( self.task_repo_stub, {}, self.policy ) tasks = task_repo.list() for i, task in enumerate(tasks): self.assertIsInstance(task, glance.api.policy.TaskStubProxy) self.assertEqual('task_from_list_%d' % i, task.task_stub) def test_add_task_not_allowed(self): rules = {"add_task": False} self.policy.set_rules(rules) task_repo = glance.api.policy.TaskRepoProxy( self.task_repo_stub, {}, self.policy ) task = glance.api.policy.TaskProxy(self.task_stub, {}, self.policy) self.assertRaises(exception.Forbidden, task_repo.add, task) def test_add_task_allowed(self): rules = {"add_task": True} self.policy.set_rules(rules) task_repo = glance.api.policy.TaskRepoProxy( self.task_repo_stub, {}, self.policy ) task = glance.api.policy.TaskProxy(self.task_stub, {}, self.policy) task_repo.add(task) class TestContextPolicyEnforcer(base.IsolatedUnitTest): def _do_test_policy_influence_context_admin(self, policy_admin_role, context_role, context_is_admin, admin_expected): self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'), group='oslo_policy') rules = {'context_is_admin': 'role:%s' % policy_admin_role} self.set_policy_rules(rules) enforcer = glance.api.policy.Enforcer() context = glance.context.RequestContext(roles=[context_role], is_admin=context_is_admin, policy_enforcer=enforcer) self.assertEqual(admin_expected, context.is_admin) def test_context_admin_policy_admin(self): self._do_test_policy_influence_context_admin('test_admin', 'test_admin', True, True) def test_context_nonadmin_policy_admin(self): self._do_test_policy_influence_context_admin('test_admin', 'test_admin', False, True) def test_context_admin_policy_nonadmin(self): self._do_test_policy_influence_context_admin('test_admin', 'demo', True, True) def test_context_nonadmin_policy_nonadmin(self): self._do_test_policy_influence_context_admin('test_admin', 'demo', False, False) glance-12.0.0/glance/tests/unit/fake_rados.py0000664000567000056710000000611612701407047022214 0ustar jenkinsjenkins00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class mock_rados(object): class ioctx(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): return False def close(self, *args, **kwargs): pass class Rados(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): return False def connect(self, *args, **kwargs): pass def open_ioctx(self, *args, **kwargs): return mock_rados.ioctx() def shutdown(self, *args, **kwargs): pass class mock_rbd(object): class ImageExists(Exception): pass class ImageBusy(Exception): pass class ImageNotFound(Exception): pass class Image(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): pass def create_snap(self, *args, **kwargs): pass def remove_snap(self, *args, **kwargs): pass def protect_snap(self, *args, **kwargs): pass def unprotect_snap(self, *args, **kwargs): pass def read(self, *args, **kwargs): raise NotImplementedError() def write(self, *args, **kwargs): raise NotImplementedError() def resize(self, *args, **kwargs): raise NotImplementedError() def discard(self, offset, length): raise NotImplementedError() def close(self): pass def list_snaps(self): raise NotImplementedError() def parent_info(self): raise NotImplementedError() def size(self): raise NotImplementedError() class RBD(object): def __init__(self, *args, **kwargs): pass def __enter__(self, *args, **kwargs): return self def __exit__(self, *args, **kwargs): return False def create(self, *args, **kwargs): pass def remove(self, *args, **kwargs): pass def list(self, *args, **kwargs): raise NotImplementedError() def clone(self, *args, **kwargs): raise NotImplementedError() glance-12.0.0/glance/tests/unit/test_domain_proxy.py0000664000567000056710000002464112701407047023670 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.domain import proxy import glance.tests.utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class FakeProxy(object): def __init__(self, base, *args, **kwargs): self.base = base self.args = args self.kwargs = kwargs class FakeRepo(object): def __init__(self, result=None): self.args = None self.kwargs = None self.result = result def fake_method(self, *args, **kwargs): self.args = args self.kwargs = kwargs return self.result get = fake_method list = fake_method add = fake_method save = fake_method remove = fake_method class TestProxyRepoPlain(test_utils.BaseTestCase): def setUp(self): super(TestProxyRepoPlain, self).setUp() self.fake_repo = FakeRepo() self.proxy_repo = proxy.Repo(self.fake_repo) def _test_method(self, name, base_result, *args, **kwargs): self.fake_repo.result = base_result method = getattr(self.proxy_repo, name) proxy_result = method(*args, **kwargs) self.assertEqual(base_result, proxy_result) self.assertEqual(args, self.fake_repo.args) self.assertEqual(kwargs, self.fake_repo.kwargs) def test_get(self): self._test_method('get', 'snarf', 'abcd') def test_list(self): self._test_method('list', ['sniff', 'snarf'], 2, filter='^sn') def test_add(self): self._test_method('add', 'snuff', 'enough') def test_save(self): self._test_method('save', 'snuff', 'enough', from_state=None) def test_remove(self): self._test_method('add', None, 'flying') class TestProxyRepoWrapping(test_utils.BaseTestCase): def setUp(self): super(TestProxyRepoWrapping, self).setUp() self.fake_repo = FakeRepo() self.proxy_repo = proxy.Repo(self.fake_repo, item_proxy_class=FakeProxy, item_proxy_kwargs={'a': 1}) def _test_method(self, name, base_result, *args, **kwargs): self.fake_repo.result = base_result method = getattr(self.proxy_repo, name) proxy_result = method(*args, **kwargs) self.assertIsInstance(proxy_result, FakeProxy) self.assertEqual(base_result, proxy_result.base) self.assertEqual(0, len(proxy_result.args)) self.assertEqual({'a': 1}, proxy_result.kwargs) self.assertEqual(args, self.fake_repo.args) self.assertEqual(kwargs, self.fake_repo.kwargs) def test_get(self): self.fake_repo.result = 'snarf' result = self.proxy_repo.get('some-id') self.assertIsInstance(result, FakeProxy) self.assertEqual(('some-id',), self.fake_repo.args) self.assertEqual({}, self.fake_repo.kwargs) self.assertEqual('snarf', result.base) self.assertEqual(tuple(), result.args) self.assertEqual({'a': 1}, result.kwargs) def test_list(self): self.fake_repo.result = ['scratch', 'sniff'] results = self.proxy_repo.list(2, prefix='s') self.assertEqual((2,), self.fake_repo.args) self.assertEqual({'prefix': 's'}, self.fake_repo.kwargs) self.assertEqual(2, len(results)) for i in range(2): self.assertIsInstance(results[i], FakeProxy) self.assertEqual(self.fake_repo.result[i], results[i].base) self.assertEqual(tuple(), results[i].args) self.assertEqual({'a': 1}, results[i].kwargs) def _test_method_with_proxied_argument(self, name, result, **kwargs): self.fake_repo.result = result item = FakeProxy('snoop') method = getattr(self.proxy_repo, name) proxy_result = method(item) self.assertEqual(('snoop',), self.fake_repo.args) self.assertEqual(kwargs, self.fake_repo.kwargs) if result is None: self.assertIsNone(proxy_result) else: self.assertIsInstance(proxy_result, FakeProxy) self.assertEqual(result, proxy_result.base) self.assertEqual(tuple(), proxy_result.args) self.assertEqual({'a': 1}, proxy_result.kwargs) def test_add(self): self._test_method_with_proxied_argument('add', 'dog') def test_add_with_no_result(self): self._test_method_with_proxied_argument('add', None) def test_save(self): self._test_method_with_proxied_argument('save', 'dog', from_state=None) def test_save_with_no_result(self): self._test_method_with_proxied_argument('save', None, from_state=None) def test_remove(self): self._test_method_with_proxied_argument('remove', 'dog') def test_remove_with_no_result(self): self._test_method_with_proxied_argument('remove', None) class FakeImageFactory(object): def __init__(self, result=None): self.result = None self.kwargs = None def new_image(self, **kwargs): self.kwargs = kwargs return self.result class TestImageFactory(test_utils.BaseTestCase): def setUp(self): super(TestImageFactory, self).setUp() self.factory = FakeImageFactory() def test_proxy_plain(self): proxy_factory = proxy.ImageFactory(self.factory) self.factory.result = 'eddard' image = proxy_factory.new_image(a=1, b='two') self.assertEqual('eddard', image) self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs) def test_proxy_wrapping(self): proxy_factory = proxy.ImageFactory(self.factory, proxy_class=FakeProxy, proxy_kwargs={'dog': 'bark'}) self.factory.result = 'stark' image = proxy_factory.new_image(a=1, b='two') self.assertIsInstance(image, FakeProxy) self.assertEqual('stark', image.base) self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs) class FakeImageMembershipFactory(object): def __init__(self, result=None): self.result = None self.image = None self.member_id = None def new_image_member(self, image, member_id): self.image = image self.member_id = member_id return self.result class TestImageMembershipFactory(test_utils.BaseTestCase): def setUp(self): super(TestImageMembershipFactory, self).setUp() self.factory = FakeImageMembershipFactory() def test_proxy_plain(self): proxy_factory = proxy.ImageMembershipFactory(self.factory) self.factory.result = 'tyrion' membership = proxy_factory.new_image_member('jaime', 'cersei') self.assertEqual('tyrion', membership) self.assertEqual('jaime', self.factory.image) self.assertEqual('cersei', self.factory.member_id) def test_proxy_wrapped_membership(self): proxy_factory = proxy.ImageMembershipFactory( self.factory, proxy_class=FakeProxy, proxy_kwargs={'a': 1}) self.factory.result = 'tyrion' membership = proxy_factory.new_image_member('jaime', 'cersei') self.assertIsInstance(membership, FakeProxy) self.assertEqual('tyrion', membership.base) self.assertEqual({'a': 1}, membership.kwargs) self.assertEqual('jaime', self.factory.image) self.assertEqual('cersei', self.factory.member_id) def test_proxy_wrapped_image(self): proxy_factory = proxy.ImageMembershipFactory( self.factory, proxy_class=FakeProxy) self.factory.result = 'tyrion' image = FakeProxy('jaime') membership = proxy_factory.new_image_member(image, 'cersei') self.assertIsInstance(membership, FakeProxy) self.assertIsInstance(self.factory.image, FakeProxy) self.assertEqual('cersei', self.factory.member_id) def test_proxy_both_wrapped(self): class FakeProxy2(FakeProxy): pass proxy_factory = proxy.ImageMembershipFactory( self.factory, proxy_class=FakeProxy, proxy_kwargs={'b': 2}) self.factory.result = 'tyrion' image = FakeProxy2('jaime') membership = proxy_factory.new_image_member(image, 'cersei') self.assertIsInstance(membership, FakeProxy) self.assertEqual('tyrion', membership.base) self.assertEqual({'b': 2}, membership.kwargs) self.assertIsInstance(self.factory.image, FakeProxy2) self.assertEqual('cersei', self.factory.member_id) class FakeImage(object): def __init__(self, result=None): self.result = result class TestTaskFactory(test_utils.BaseTestCase): def setUp(self): super(TestTaskFactory, self).setUp() self.factory = mock.Mock() self.fake_type = 'import' self.fake_owner = "owner" def test_proxy_plain(self): proxy_factory = proxy.TaskFactory(self.factory) proxy_factory.new_task( type=self.fake_type, owner=self.fake_owner ) self.factory.new_task.assert_called_once_with( type=self.fake_type, owner=self.fake_owner ) def test_proxy_wrapping(self): proxy_factory = proxy.TaskFactory( self.factory, task_proxy_class=FakeProxy, task_proxy_kwargs={'dog': 'bark'}) self.factory.new_task.return_value = 'fake_task' task = proxy_factory.new_task( type=self.fake_type, owner=self.fake_owner ) self.factory.new_task.assert_called_once_with( type=self.fake_type, owner=self.fake_owner ) self.assertIsInstance(task, FakeProxy) self.assertEqual('fake_task', task.base) glance-12.0.0/glance/tests/unit/test_quota.py0000664000567000056710000007154312701407047022314 0ustar jenkinsjenkins00000000000000# Copyright 2013, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from mock import patch from oslo_utils import encodeutils from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.common import exception from glance.common import store_utils import glance.quota from glance.tests.unit import utils as unit_test_utils from glance.tests import utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' class FakeContext(object): owner = 'someone' is_admin = False class FakeImage(object): size = None image_id = 'someid' locations = [{'url': 'file:///not/a/path', 'metadata': {}}] tags = set([]) def set_data(self, data, size=None): self.size = 0 for d in data: self.size += len(d) def __init__(self, **kwargs): self.extra_properties = kwargs.get('extra_properties', {}) class TestImageQuota(test_utils.BaseTestCase): def setUp(self): super(TestImageQuota, self).setUp() def tearDown(self): super(TestImageQuota, self).tearDown() def _get_image(self, location_count=1, image_size=10): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'xyz' base_image.size = image_size image = glance.quota.ImageProxy(base_image, context, db_api, store) locations = [] for i in range(location_count): locations.append({'url': 'file:///g/there/it/is%d' % i, 'metadata': {}, 'status': 'active'}) image_values = {'id': 'xyz', 'owner': context.owner, 'status': 'active', 'size': image_size, 'locations': locations} db_api.image_create(context, image_values) return image def test_quota_allowed(self): quota = 10 self.config(user_storage_quota=str(quota)) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = glance.quota.ImageProxy(base_image, context, db_api, store) data = '*' * quota base_image.set_data(data, size=None) image.set_data(data) self.assertEqual(quota, base_image.size) def _test_quota_allowed_unit(self, data_length, config_quota): self.config(user_storage_quota=config_quota) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = glance.quota.ImageProxy(base_image, context, db_api, store) data = '*' * data_length base_image.set_data(data, size=None) image.set_data(data) self.assertEqual(data_length, base_image.size) def test_quota_allowed_unit_b(self): self._test_quota_allowed_unit(10, '10B') def test_quota_allowed_unit_kb(self): self._test_quota_allowed_unit(10, '1KB') def test_quota_allowed_unit_mb(self): self._test_quota_allowed_unit(10, '1MB') def test_quota_allowed_unit_gb(self): self._test_quota_allowed_unit(10, '1GB') def test_quota_allowed_unit_tb(self): self._test_quota_allowed_unit(10, '1TB') def _quota_exceeded_size(self, quota, data, deleted=True, size=None): self.config(user_storage_quota=quota) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = glance.quota.ImageProxy(base_image, context, db_api, store) if deleted: with patch.object(store_utils, 'safe_delete_from_backend'): store_utils.safe_delete_from_backend( context, image.image_id, base_image.locations[0]) self.assertRaises(exception.StorageQuotaFull, image.set_data, data, size=size) def test_quota_exceeded_no_size(self): quota = 10 data = '*' * (quota + 1) # NOTE(jbresnah) When the image size is None it means that it is # not known. In this case the only time we will raise an # exception is when there is no room left at all, thus we know # it will not fit. # That's why 'get_remaining_quota' is mocked with return_value = 0. with patch.object(glance.api.common, 'get_remaining_quota', return_value=0): self._quota_exceeded_size(str(quota), data) def test_quota_exceeded_with_right_size(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size(str(quota), data, size=len(data), deleted=False) def test_quota_exceeded_with_right_size_b(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size('10B', data, size=len(data), deleted=False) def test_quota_exceeded_with_right_size_kb(self): quota = units.Ki data = '*' * (quota + 1) self._quota_exceeded_size('1KB', data, size=len(data), deleted=False) def test_quota_exceeded_with_lie_size(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size(str(quota), data, deleted=False, size=quota - 1) def test_append_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.append(new_location) pre_add_locations.append(new_location) self.assertEqual(image.locations, pre_add_locations) def test_insert_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.insert(0, new_location) pre_add_locations.insert(0, new_location) self.assertEqual(image.locations, pre_add_locations) def test_extend_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.extend([new_location]) pre_add_locations.extend([new_location]) self.assertEqual(image.locations, pre_add_locations) def test_iadd_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations += [new_location] pre_add_locations += [new_location] self.assertEqual(image.locations, pre_add_locations) def test_set_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() image.locations = [new_location] self.assertEqual(image.locations, [new_location]) def _make_image_with_quota(self, image_size=10, location_count=2): quota = image_size * location_count self.config(user_storage_quota=str(quota)) return self._get_image(image_size=image_size, location_count=location_count) def test_exceed_append_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.append, {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}) def test_exceed_insert_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.insert, 0, {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}) def test_exceed_extend_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.extend, [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}]) def test_set_location_under(self): image = self._make_image_with_quota(location_count=1) image.locations = [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}] def test_set_location_exceed(self): image = self._make_image_with_quota(location_count=1) try: image.locations = [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}, {'url': 'file:///a/path2', 'metadata': {}, 'status': 'active'}] self.fail('Should have raised the quota exception') except exception.StorageQuotaFull: pass def test_iadd_location_exceed(self): image = self._make_image_with_quota(location_count=1) try: image.locations += [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}] self.fail('Should have raised the quota exception') except exception.StorageQuotaFull: pass def test_append_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = glance.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.stubs.Set(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations.append({'url': 'file:///fake.img.tar.gz', 'metadata': {}}) self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) def test_insert_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = glance.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.stubs.Set(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations.insert(0, {'url': 'file:///fake.img.tar.gz', 'metadata': {}}) self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) def test_set_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = glance.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.stubs.Set(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations = [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}] self.assertEqual([{'url': 'file:///fake.img.tar.gz', 'metadata': {}}], image.locations) def test_iadd_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = glance.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.stubs.Set(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations += [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}] self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) class TestImagePropertyQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImagePropertyQuotas, self).setUp() self.base_image = FakeImage() self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_mock.add.return_value = self.base_image self.image_repo_mock.save.return_value = self.base_image self.image_repo_proxy = glance.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_save_image_with_image_property(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) def test_save_image_too_many_image_properties(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'} exc = self.assertRaises(exception.ImagePropertyLimitExceeded, self.image_repo_proxy.save, self.image) self.assertIn("Attempted: 2, Maximum: 1", encodeutils.exception_to_unicode(exc)) def test_save_image_unlimited_image_properties(self): self.config(image_property_quota=-1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) def test_add_image_with_image_property(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.add(self.image) self.image_repo_mock.add.assert_called_once_with(self.base_image) def test_add_image_too_many_image_properties(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'} exc = self.assertRaises(exception.ImagePropertyLimitExceeded, self.image_repo_proxy.add, self.image) self.assertIn("Attempted: 2, Maximum: 1", encodeutils.exception_to_unicode(exc)) def test_add_image_unlimited_image_properties(self): self.config(image_property_quota=-1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.add(self.image) self.image_repo_mock.add.assert_called_once_with(self.base_image) def _quota_exceed_setup(self): self.config(image_property_quota=2) self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham'} self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) def test_modify_image_properties_when_quota_exceeded(self): self._quota_exceed_setup() self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'frob', 'spam': 'eggs'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertEqual('frob', self.base_image.extra_properties['foo']) self.assertEqual('eggs', self.base_image.extra_properties['spam']) def test_delete_image_properties_when_quota_exceeded(self): self._quota_exceed_setup() self.config(image_property_quota=1) del self.image.extra_properties['foo'] self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertNotIn('foo', self.base_image.extra_properties) self.assertEqual('ham', self.base_image.extra_properties['spam']) def test_invalid_quota_config_parameter(self): self.config(user_storage_quota='foo') location = {"url": "file:///fake.img.tar.gz", "metadata": {}} self.assertRaises(exception.InvalidOptionValue, self.image.locations.append, location) def test_exceed_quota_during_patch_operation(self): self._quota_exceed_setup() self.image.extra_properties['frob'] = 'baz' self.image.extra_properties['lorem'] = 'ipsum' self.assertEqual('bar', self.base_image.extra_properties['foo']) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertEqual('baz', self.base_image.extra_properties['frob']) self.assertEqual('ipsum', self.base_image.extra_properties['lorem']) del self.image.extra_properties['frob'] del self.image.extra_properties['lorem'] self.image_repo_proxy.save(self.image) call_args = mock.call(self.base_image, from_state=None) self.assertEqual(call_args, self.image_repo_mock.save.call_args) self.assertEqual('bar', self.base_image.extra_properties['foo']) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertNotIn('frob', self.base_image.extra_properties) self.assertNotIn('lorem', self.base_image.extra_properties) def test_quota_exceeded_after_delete_image_properties(self): self.config(image_property_quota=3) self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham', 'frob': 'baz'} self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.config(image_property_quota=1) del self.image.extra_properties['foo'] self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertNotIn('foo', self.base_image.extra_properties) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertEqual('baz', self.base_image.extra_properties['frob']) class TestImageTagQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageTagQuotas, self).setUp() self.base_image = mock.Mock() self.base_image.tags = set([]) self.base_image.extra_properties = {} self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_proxy = glance.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_replace_image_tag(self): self.config(image_tag_quota=1) self.image.tags = ['foo'] self.assertEqual(1, len(self.image.tags)) def test_replace_too_many_image_tags(self): self.config(image_tag_quota=0) exc = self.assertRaises(exception.ImageTagLimitExceeded, setattr, self.image, 'tags', ['foo', 'bar']) self.assertIn('Attempted: 2, Maximum: 0', encodeutils.exception_to_unicode(exc)) self.assertEqual(0, len(self.image.tags)) def test_replace_unlimited_image_tags(self): self.config(image_tag_quota=-1) self.image.tags = ['foo'] self.assertEqual(1, len(self.image.tags)) def test_add_image_tag(self): self.config(image_tag_quota=1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) def test_add_too_many_image_tags(self): self.config(image_tag_quota=1) self.image.tags.add('foo') exc = self.assertRaises(exception.ImageTagLimitExceeded, self.image.tags.add, 'bar') self.assertIn('Attempted: 2, Maximum: 1', encodeutils.exception_to_unicode(exc)) def test_add_unlimited_image_tags(self): self.config(image_tag_quota=-1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) def test_remove_image_tag_while_over_quota(self): self.config(image_tag_quota=1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) self.config(image_tag_quota=0) self.image.tags.remove('foo') self.assertEqual(0, len(self.image.tags)) class TestQuotaImageTagsProxy(test_utils.BaseTestCase): def setUp(self): super(TestQuotaImageTagsProxy, self).setUp() def test_add(self): proxy = glance.quota.QuotaImageTagsProxy(set([])) proxy.add('foo') self.assertIn('foo', proxy) def test_add_too_many_tags(self): self.config(image_tag_quota=0) proxy = glance.quota.QuotaImageTagsProxy(set([])) exc = self.assertRaises(exception.ImageTagLimitExceeded, proxy.add, 'bar') self.assertIn('Attempted: 1, Maximum: 0', encodeutils.exception_to_unicode(exc)) def test_equals(self): proxy = glance.quota.QuotaImageTagsProxy(set([])) self.assertEqual(set([]), proxy) def test_contains(self): proxy = glance.quota.QuotaImageTagsProxy(set(['foo'])) self.assertIn('foo', proxy) def test_len(self): proxy = glance.quota.QuotaImageTagsProxy(set(['foo', 'bar', 'baz', 'niz'])) self.assertEqual(4, len(proxy)) def test_iter(self): items = set(['foo', 'bar', 'baz', 'niz']) proxy = glance.quota.QuotaImageTagsProxy(items.copy()) self.assertEqual(4, len(items)) for item in proxy: items.remove(item) self.assertEqual(0, len(items)) class TestImageMemberQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageMemberQuotas, self).setUp() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) context = FakeContext() self.image = mock.Mock() self.base_image_member_factory = mock.Mock() self.image_member_factory = glance.quota.ImageMemberFactoryProxy( self.base_image_member_factory, context, db_api, store) def test_new_image_member(self): self.config(image_member_quota=1) self.image_member_factory.new_image_member(self.image, 'fake_id') nim = self.base_image_member_factory.new_image_member nim.assert_called_once_with(self.image, 'fake_id') def test_new_image_member_unlimited_members(self): self.config(image_member_quota=-1) self.image_member_factory.new_image_member(self.image, 'fake_id') nim = self.base_image_member_factory.new_image_member nim.assert_called_once_with(self.image, 'fake_id') def test_new_image_member_too_many_members(self): self.config(image_member_quota=0) self.assertRaises(exception.ImageMemberLimitExceeded, self.image_member_factory.new_image_member, self.image, 'fake_id') class TestImageLocationQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageLocationQuotas, self).setUp() self.base_image = mock.Mock() self.base_image.locations = [] self.base_image.size = 1 self.base_image.extra_properties = {} self.image = glance.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_proxy = glance.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_replace_image_location(self): self.config(image_location_quota=1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {} }] self.assertEqual(1, len(self.image.locations)) def test_replace_too_many_image_locations(self): self.config(image_location_quota=1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {}} ] locations = [ {"url": "file:///fake1.img.tar.gz", "metadata": {}}, {"url": "file:///fake2.img.tar.gz", "metadata": {}}, {"url": "file:///fake3.img.tar.gz", "metadata": {}} ] exc = self.assertRaises(exception.ImageLocationLimitExceeded, setattr, self.image, 'locations', locations) self.assertIn('Attempted: 3, Maximum: 1', encodeutils.exception_to_unicode(exc)) self.assertEqual(1, len(self.image.locations)) def test_replace_unlimited_image_locations(self): self.config(image_location_quota=-1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {}} ] self.assertEqual(1, len(self.image.locations)) def test_add_image_location(self): self.config(image_location_quota=1) location = {"url": "file:///fake.img.tar.gz", "metadata": {}} self.image.locations.append(location) self.assertEqual(1, len(self.image.locations)) def test_add_too_many_image_locations(self): self.config(image_location_quota=1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) location2 = {"url": "file:///fake2.img.tar.gz", "metadata": {}} exc = self.assertRaises(exception.ImageLocationLimitExceeded, self.image.locations.append, location2) self.assertIn('Attempted: 2, Maximum: 1', encodeutils.exception_to_unicode(exc)) def test_add_unlimited_image_locations(self): self.config(image_location_quota=-1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) self.assertEqual(1, len(self.image.locations)) def test_remove_image_location_while_over_quota(self): self.config(image_location_quota=1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) self.assertEqual(1, len(self.image.locations)) self.config(image_location_quota=0) self.image.locations.remove(location1) self.assertEqual(0, len(self.image.locations)) glance-12.0.0/glance/tests/unit/utils.py0000664000567000056710000002373412701407047021263 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography import exceptions as crypto_exception from debtcollector import removals import glance_store as store import mock from oslo_config import cfg from oslo_log import log as logging from six.moves import urllib from glance.common import exception from glance.common import store_utils from glance.common import wsgi import glance.context import glance.db.simple.api as simple_db CONF = cfg.CONF LOG = logging.getLogger(__name__) UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' USER2 = '0b3b3006-cb76-4517-ae32-51397e22c754' USER3 = '2hss8dkl-d8jh-88yd-uhs9-879sdjsd8skd' BASE_URI = 'http://storeurl.com/container' def sort_url_by_qs_keys(url): # NOTE(kragniz): this only sorts the keys of the query string of a url. # For example, an input of '/v2/tasks?sort_key=id&sort_dir=asc&limit=10' # returns '/v2/tasks?limit=10&sort_dir=asc&sort_key=id'. This is to prevent # non-deterministic ordering of the query string causing problems with unit # tests. parsed = urllib.parse.urlparse(url) queries = urllib.parse.parse_qsl(parsed.query, True) sorted_query = sorted(queries, key=lambda x: x[0]) encoded_sorted_query = urllib.parse.urlencode(sorted_query, True) url_parts = (parsed.scheme, parsed.netloc, parsed.path, parsed.params, encoded_sorted_query, parsed.fragment) return urllib.parse.urlunparse(url_parts) def get_fake_request(path='', method='POST', is_admin=False, user=USER1, roles=None, tenant=TENANT1): if roles is None: roles = ['member'] req = wsgi.Request.blank(path) req.method = method kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': is_admin, } req.context = glance.context.RequestContext(**kwargs) return req def fake_get_size_from_backend(uri, context=None): return 1 @removals.remove(message="This will be removed in the N cycle.") def fake_old_verify_signature(context, checksum_hash, image_properties): if (image_properties is not None and 'signature' in image_properties and image_properties['signature'] == 'VALID'): return True else: raise exception.SignatureVerificationError( 'Signature verification failed.') def fake_get_verifier(context, image_properties): verifier = mock.Mock() if (image_properties is not None and 'img_signature' in image_properties and image_properties['img_signature'] == 'VALID'): verifier.verify.return_value = None else: ex = crypto_exception.InvalidSignature() verifier.verify.side_effect = ex return verifier class FakeDB(object): def __init__(self, initialize=True): self.reset() if initialize: self.init_db() @staticmethod def init_db(): images = [ {'id': UUID1, 'owner': TENANT1, 'status': 'queued', 'locations': [{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'queued'}]}, {'id': UUID2, 'owner': TENANT1, 'status': 'queued'}, ] [simple_db.image_create(None, image) for image in images] members = [ {'image_id': UUID1, 'member': TENANT1, 'can_share': True}, {'image_id': UUID1, 'member': TENANT2, 'can_share': False}, ] [simple_db.image_member_create(None, member) for member in members] simple_db.image_tag_set_all(None, UUID1, ['ping', 'pong']) @staticmethod def reset(): simple_db.reset() def __getattr__(self, key): return getattr(simple_db, key) class FakeStoreUtils(object): def __init__(self, store_api): self.store_api = store_api def safe_delete_from_backend(self, context, id, location): try: del self.store_api.data[location['url']] except KeyError: pass def schedule_delayed_delete_from_backend(self, context, id, location): pass def delete_image_location_from_backend(self, context, image_id, location): if CONF.delayed_delete: self.schedule_delayed_delete_from_backend(context, image_id, location) else: self.safe_delete_from_backend(context, image_id, location) def validate_external_location(self, uri): if uri and urllib.parse.urlparse(uri).scheme: return store_utils.validate_external_location(uri) else: return True class FakeStoreAPI(object): def __init__(self, store_metadata=None): self.data = { '%s/%s' % (BASE_URI, UUID1): ('XXX', 3), '%s/fake_location' % (BASE_URI): ('YYY', 3) } self.acls = {} if store_metadata is None: self.store_metadata = {} else: self.store_metadata = store_metadata def create_stores(self): pass def set_acls(self, uri, public=False, read_tenants=None, write_tenants=None, context=None): if read_tenants is None: read_tenants = [] if write_tenants is None: write_tenants = [] self.acls[uri] = { 'public': public, 'read': read_tenants, 'write': write_tenants, } def get_from_backend(self, location, offset=0, chunk_size=None, context=None): try: scheme = location[:location.find('/') - 1] if scheme == 'unknown': raise store.UnknownScheme(scheme=scheme) return self.data[location] except KeyError: raise store.NotFound(image=location) def get_size_from_backend(self, location, context=None): return self.get_from_backend(location, context=context)[1] def add_to_backend(self, conf, image_id, data, size, scheme=None, context=None, verifier=None): store_max_size = 7 current_store_size = 2 for location in self.data.keys(): if image_id in location: raise exception.Duplicate() if not size: # 'data' is a string wrapped in a LimitingReader|CooperativeReader # pipeline, so peek under the hood of those objects to get at the # string itself. size = len(data.data.fd) if (current_store_size + size) > store_max_size: raise exception.StorageFull() if context.user == USER2: raise exception.Forbidden() if context.user == USER3: raise exception.StorageWriteDenied() self.data[image_id] = (data, size) checksum = 'Z' return (image_id, size, checksum, self.store_metadata) def check_location_metadata(self, val, key=''): store.check_location_metadata(val) class FakePolicyEnforcer(object): def __init__(self, *_args, **kwargs): self.rules = {} def enforce(self, _ctxt, action, target=None, **kwargs): """Raise Forbidden if a rule for given action is set to false.""" if self.rules.get(action) is False: raise exception.Forbidden() def set_rules(self, rules): self.rules = rules class FakeNotifier(object): def __init__(self, *_args, **kwargs): self.log = [] def _notify(self, event_type, payload, level): log = { 'notification_type': level, 'event_type': event_type, 'payload': payload } self.log.append(log) def warn(self, event_type, payload): self._notify(event_type, payload, 'WARN') def info(self, event_type, payload): self._notify(event_type, payload, 'INFO') def error(self, event_type, payload): self._notify(event_type, payload, 'ERROR') def debug(self, event_type, payload): self._notify(event_type, payload, 'DEBUG') def critical(self, event_type, payload): self._notify(event_type, payload, 'CRITICAL') def get_logs(self): return self.log class FakeGateway(object): def __init__(self, image_factory=None, image_member_factory=None, image_repo=None, task_factory=None, task_repo=None): self.image_factory = image_factory self.image_member_factory = image_member_factory self.image_repo = image_repo self.task_factory = task_factory self.task_repo = task_repo def get_image_factory(self, context): return self.image_factory def get_image_member_factory(self, context): return self.image_member_factory def get_repo(self, context): return self.image_repo def get_task_factory(self, context): return self.task_factory def get_task_repo(self, context): return self.task_repo class FakeTask(object): def __init__(self, task_id, type=None, status=None): self.task_id = task_id self.type = type self.message = None self.input = None self._status = status self._executor = None def success(self, result): self.result = result self._status = 'success' def fail(self, message): self.message = message self._status = 'failure' glance-12.0.0/glance/tests/unit/__init__.py0000664000567000056710000000000012701407047021637 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/test_schema.py0000664000567000056710000001346112701407047022416 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import exception import glance.schema from glance.tests import utils as test_utils class TestBasicSchema(test_utils.BaseTestCase): def setUp(self): super(TestBasicSchema, self).setUp() properties = { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, } self.schema = glance.schema.Schema('basic', properties) def test_validate_passes(self): obj = {'ham': 'no', 'eggs': 'scrambled'} self.schema.validate(obj) # No exception raised def test_validate_fails_on_extra_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} self.assertRaises(exception.InvalidObject, self.schema.validate, obj) def test_validate_fails_on_bad_type(self): obj = {'eggs': 2} self.assertRaises(exception.InvalidObject, self.schema.validate, obj) def test_filter_strips_extra_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} filtered = self.schema.filter(obj) expected = {'ham': 'virginia', 'eggs': 'scrambled'} self.assertEqual(expected, filtered) def test_merge_properties(self): self.schema.merge_properties({'bacon': {'type': 'string'}}) expected = set(['ham', 'eggs', 'bacon']) actual = set(self.schema.raw()['properties'].keys()) self.assertEqual(expected, actual) def test_merge_conflicting_properties(self): conflicts = {'eggs': {'type': 'integer'}} self.assertRaises(exception.SchemaLoadError, self.schema.merge_properties, conflicts) def test_merge_conflicting_but_identical_properties(self): conflicts = {'ham': {'type': 'string'}} self.schema.merge_properties(conflicts) # no exception raised expected = set(['ham', 'eggs']) actual = set(self.schema.raw()['properties'].keys()) self.assertEqual(expected, actual) def test_raw_json_schema(self): expected = { 'name': 'basic', 'properties': { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, }, 'additionalProperties': False, } self.assertEqual(expected, self.schema.raw()) class TestBasicSchemaLinks(test_utils.BaseTestCase): def setUp(self): super(TestBasicSchemaLinks, self).setUp() properties = { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, } links = [ {'rel': 'up', 'href': '/menu'}, ] self.schema = glance.schema.Schema('basic', properties, links) def test_raw_json_schema(self): expected = { 'name': 'basic', 'properties': { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, }, 'links': [ {'rel': 'up', 'href': '/menu'}, ], 'additionalProperties': False, } self.assertEqual(expected, self.schema.raw()) class TestPermissiveSchema(test_utils.BaseTestCase): def setUp(self): super(TestPermissiveSchema, self).setUp() properties = { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, } self.schema = glance.schema.PermissiveSchema('permissive', properties) def test_validate_with_additional_properties_allowed(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} self.schema.validate(obj) # No exception raised def test_validate_rejects_non_string_extra_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'grits': 1000} self.assertRaises(exception.InvalidObject, self.schema.validate, obj) def test_filter_passes_extra_properties(self): obj = {'ham': 'virginia', 'eggs': 'scrambled', 'bacon': 'crispy'} filtered = self.schema.filter(obj) self.assertEqual(obj, filtered) def test_raw_json_schema(self): expected = { 'name': 'permissive', 'properties': { 'ham': {'type': 'string'}, 'eggs': {'type': 'string'}, }, 'additionalProperties': {'type': 'string'}, } self.assertEqual(expected, self.schema.raw()) class TestCollectionSchema(test_utils.BaseTestCase): def test_raw_json_schema(self): item_properties = {'cheese': {'type': 'string'}} item_schema = glance.schema.Schema('mouse', item_properties) collection_schema = glance.schema.CollectionSchema('mice', item_schema) expected = { 'name': 'mice', 'properties': { 'mice': { 'type': 'array', 'items': item_schema.raw(), }, 'first': {'type': 'string'}, 'next': {'type': 'string'}, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'first', 'href': '{first}'}, {'rel': 'next', 'href': '{next}'}, {'rel': 'describedby', 'href': '{schema}'}, ], } self.assertEqual(expected, collection_schema.raw()) glance-12.0.0/glance/tests/unit/test_migrations.py0000664000567000056710000023140512701407047023332 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations run a series of test cases to ensure that migrations work properly both upgrading and downgrading, and that no data loss occurs if possible. """ from __future__ import print_function import datetime import os import pickle import uuid from migrate.versioning import api as migration_api from migrate.versioning.repository import Repository from oslo_config import cfg from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations from oslo_db.sqlalchemy import utils as db_utils from oslo_serialization import jsonutils from oslo_utils import uuidutils # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import sqlalchemy from sqlalchemy import inspect import sqlalchemy.types as types from glance.common import crypt from glance.common import exception from glance.common import timeutils from glance.db import migration from glance.db.sqlalchemy import migrate_repo from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import from glance.db.sqlalchemy import models from glance.db.sqlalchemy import models_glare from glance.db.sqlalchemy import models_metadef from glance.i18n import _ CONF = cfg.CONF CONF.import_opt('metadata_encryption_key', 'glance.common.config') def index_exist(index, table, engine): inspector = sqlalchemy.inspect(engine) return index in [i['name'] for i in inspector.get_indexes(table)] def unique_constraint_exist(constraint, table, engine): inspector = sqlalchemy.inspect(engine) return constraint in [c['name'] for c in inspector.get_unique_constraints(table)] class MigrationsMixin(test_migrations.WalkVersionsMixin): @property def INIT_VERSION(self): return migration.INIT_VERSION @property def REPOSITORY(self): migrate_file = migrate_repo.__file__ return Repository(os.path.abspath(os.path.dirname(migrate_file))) @property def migration_api(self): return migration_api @property def migrate_engine(self): return self.engine def test_walk_versions(self): # No more downgrades self._walk_versions(False, False) def _create_unversioned_001_db(self, engine): # Create the initial version of the images table meta = sqlalchemy.schema.MetaData() meta.bind = engine images_001 = sqlalchemy.Table('images', meta, sqlalchemy.Column('id', models.Integer, primary_key=True), sqlalchemy.Column('name', sqlalchemy.String(255) ), sqlalchemy.Column('type', sqlalchemy.String(30)), sqlalchemy.Column('size', sqlalchemy.Integer), sqlalchemy.Column('status', sqlalchemy.String(30)), sqlalchemy.Column('is_public', sqlalchemy.Boolean, default=False), sqlalchemy.Column('location', sqlalchemy.Text), sqlalchemy.Column('created_at', sqlalchemy.DateTime(), nullable=False), sqlalchemy.Column('updated_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted', sqlalchemy.Boolean(), nullable=False, default=False), mysql_engine='InnoDB', mysql_charset='utf8') images_001.create() def test_version_control_existing_db(self): """ Creates a DB without version control information, places it under version control and checks that it can be upgraded without errors. """ self._create_unversioned_001_db(self.migrate_engine) old_version = migration.INIT_VERSION # we must start from version 1 migration.INIT_VERSION = 1 self.addCleanup(setattr, migration, 'INIT_VERSION', old_version) self._walk_versions(False, False) def _pre_upgrade_003(self, engine): now = datetime.datetime.now() images = db_utils.get_table(engine, 'images') data = {'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True} images.insert().values(data).execute() return data def _check_003(self, engine, data): images = db_utils.get_table(engine, 'images') self.assertNotIn('type', images.c, "'type' column found in images table columns! " "images table columns reported by metadata: %s\n" % images.c.keys()) images_prop = db_utils.get_table(engine, 'image_properties') result = images_prop.select().execute() types = [] for row in result: if row['key'] == 'type': types.append(row['value']) self.assertIn(data['type'], types) def _pre_upgrade_004(self, engine): """Insert checksum data sample to check if migration goes fine with data. """ now = timeutils.utcnow() images = db_utils.get_table(engine, 'images') data = [ { 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True, } ] engine.execute(images.insert(), data) return data def _check_004(self, engine, data): """Assure that checksum data is present on table""" images = db_utils.get_table(engine, 'images') self.assertIn('checksum', images.c) self.assertEqual(32, images.c['checksum'].type.length) def _pre_upgrade_005(self, engine): now = timeutils.utcnow() images = db_utils.get_table(engine, 'images') data = [ { 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True, # Integer type signed size limit 'size': 2147483647 } ] engine.execute(images.insert(), data) return data def _check_005(self, engine, data): images = db_utils.get_table(engine, 'images') select = images.select().execute() sizes = [row['size'] for row in select if row['size'] is not None] migrated_data_sizes = [element['size'] for element in data] for migrated in migrated_data_sizes: self.assertIn(migrated, sizes) def _pre_upgrade_006(self, engine): now = timeutils.utcnow() images = db_utils.get_table(engine, 'images') image_data = [ { 'deleted': False, 'created_at': now, 'updated_at': now, 'type': 'kernel', 'status': 'active', 'is_public': True, 'id': 9999, } ] engine.execute(images.insert(), image_data) images_properties = db_utils.get_table(engine, 'image_properties') properties_data = [ { 'id': 10, 'image_id': 9999, 'updated_at': now, 'created_at': now, 'deleted': False, 'key': 'image_name' } ] engine.execute(images_properties.insert(), properties_data) return properties_data def _check_006(self, engine, data): images_properties = db_utils.get_table(engine, 'image_properties') select = images_properties.select().execute() # load names from name collumn image_names = [row['name'] for row in select] # check names from data in image names from name collumn for element in data: self.assertIn(element['key'], image_names) def _pre_upgrade_010(self, engine): """Test rows in images with NULL updated_at get updated to equal created_at. """ initial_values = [ (datetime.datetime(1999, 1, 2, 4, 10, 20), datetime.datetime(1999, 1, 2, 4, 10, 30)), (datetime.datetime(1999, 2, 4, 6, 15, 25), datetime.datetime(1999, 2, 4, 6, 15, 35)), (datetime.datetime(1999, 3, 6, 8, 20, 30), None), (datetime.datetime(1999, 4, 8, 10, 25, 35), None), ] images = db_utils.get_table(engine, 'images') for created_at, updated_at in initial_values: row = dict(deleted=False, created_at=created_at, updated_at=updated_at, status='active', is_public=True, min_disk=0, min_ram=0) images.insert().values(row).execute() return initial_values def _check_010(self, engine, data): values = {c: u for c, u in data} images = db_utils.get_table(engine, 'images') for row in images.select().execute(): if row['created_at'] in values: # updated_at should be unchanged if not previous NULL, or # set to created_at if previously NULL updated_at = values.pop(row['created_at']) or row['created_at'] self.assertEqual(row['updated_at'], updated_at) # No initial values should be remaining self.assertEqual(0, len(values)) def _pre_upgrade_012(self, engine): """Test rows in images have id changes from int to varchar(32) and value changed from int to UUID. Also test image_members and image_properties gets updated to point to new UUID keys. """ images = db_utils.get_table(engine, 'images') image_members = db_utils.get_table(engine, 'image_members') image_properties = db_utils.get_table(engine, 'image_properties') # Insert kernel, ramdisk and normal images now = timeutils.utcnow() data = {'created_at': now, 'updated_at': now, 'status': 'active', 'deleted': False, 'is_public': True, 'min_disk': 0, 'min_ram': 0} test_data = {} for name in ('kernel', 'ramdisk', 'normal'): data['name'] = '%s migration 012 test' % name result = images.insert().values(data).execute() test_data[name] = result.inserted_primary_key[0] # Insert image_members and image_properties rows data = {'created_at': now, 'updated_at': now, 'deleted': False, 'image_id': test_data['normal'], 'member': 'foobar', 'can_share': False} result = image_members.insert().values(data).execute() test_data['member'] = result.inserted_primary_key[0] data = {'created_at': now, 'updated_at': now, 'deleted': False, 'image_id': test_data['normal'], 'name': 'ramdisk_id', 'value': test_data['ramdisk']} result = image_properties.insert().values(data).execute() test_data['properties'] = [result.inserted_primary_key[0]] data.update({'name': 'kernel_id', 'value': test_data['kernel']}) result = image_properties.insert().values(data).execute() test_data['properties'].append(result.inserted_primary_key) return test_data def _check_012(self, engine, test_data): images = db_utils.get_table(engine, 'images') image_members = db_utils.get_table(engine, 'image_members') image_properties = db_utils.get_table(engine, 'image_properties') # Find kernel, ramdisk and normal images. Make sure id has been # changed to a uuid uuids = {} for name in ('kernel', 'ramdisk', 'normal'): image_name = '%s migration 012 test' % name rows = images.select().where( images.c.name == image_name).execute().fetchall() self.assertEqual(1, len(rows)) row = rows[0] self.assertTrue(uuidutils.is_uuid_like(row['id'])) uuids[name] = row['id'] # Find all image_members to ensure image_id has been updated results = image_members.select().where( image_members.c.image_id == uuids['normal']).execute().fetchall() self.assertEqual(1, len(results)) # Find all image_properties to ensure image_id has been updated # as well as ensure kernel_id and ramdisk_id values have been # updated too results = image_properties.select().where( image_properties.c.image_id == uuids['normal'] ).execute().fetchall() self.assertEqual(2, len(results)) for row in results: self.assertIn(row['name'], ('kernel_id', 'ramdisk_id')) if row['name'] == 'kernel_id': self.assertEqual(row['value'], uuids['kernel']) if row['name'] == 'ramdisk_id': self.assertEqual(row['value'], uuids['ramdisk']) def _post_downgrade_012(self, engine): images = db_utils.get_table(engine, 'images') image_members = db_utils.get_table(engine, 'image_members') image_properties = db_utils.get_table(engine, 'image_properties') # Find kernel, ramdisk and normal images. Make sure id has been # changed back to an integer ids = {} for name in ('kernel', 'ramdisk', 'normal'): image_name = '%s migration 012 test' % name rows = images.select().where( images.c.name == image_name).execute().fetchall() self.assertEqual(1, len(rows)) row = rows[0] self.assertFalse(uuidutils.is_uuid_like(row['id'])) ids[name] = row['id'] # Find all image_members to ensure image_id has been updated results = image_members.select().where( image_members.c.image_id == ids['normal']).execute().fetchall() self.assertEqual(1, len(results)) # Find all image_properties to ensure image_id has been updated # as well as ensure kernel_id and ramdisk_id values have been # updated too results = image_properties.select().where( image_properties.c.image_id == ids['normal']).execute().fetchall() self.assertEqual(2, len(results)) for row in results: self.assertIn(row['name'], ('kernel_id', 'ramdisk_id')) if row['name'] == 'kernel_id': self.assertEqual(row['value'], str(ids['kernel'])) if row['name'] == 'ramdisk_id': self.assertEqual(row['value'], str(ids['ramdisk'])) def _assert_invalid_swift_uri_raises_bad_store_uri(self, legacy_parse_uri_fn): invalid_uri = ('swift://http://acct:usr:pass@example.com' '/container/obj-id') # URI cannot contain more than one occurrence of a scheme. self.assertRaises(exception.BadStoreUri, legacy_parse_uri_fn, invalid_uri, True) invalid_scheme_uri = ('http://acct:usr:pass@example.com' '/container/obj-id') self.assertRaises(exception.BadStoreUri, legacy_parse_uri_fn, invalid_scheme_uri, True) invalid_account_missing_uri = 'swift+http://container/obj-id' # Badly formed S3 URI: swift+http://container/obj-id self.assertRaises(exception.BadStoreUri, legacy_parse_uri_fn, invalid_account_missing_uri, True) invalid_container_missing_uri = ('swift+http://' 'acct:usr:pass@example.com/obj-id') # Badly formed S3 URI: swift+http://acct:usr:pass@example.com/obj-id self.assertRaises(exception.BadStoreUri, legacy_parse_uri_fn, invalid_container_missing_uri, True) invalid_object_missing_uri = ('swift+http://' 'acct:usr:pass@example.com/container') # Badly formed S3 URI: swift+http://acct:usr:pass@example.com/container self.assertRaises(exception.BadStoreUri, legacy_parse_uri_fn, invalid_object_missing_uri, True) invalid_user_without_pass_uri = ('swift://acctusr@example.com' '/container/obj-id') # Badly formed credentials '%(creds)s' in Swift URI self.assertRaises(exception.BadStoreUri, legacy_parse_uri_fn, invalid_user_without_pass_uri, True) # Badly formed credentials in Swift URI. self.assertRaises(exception.BadStoreUri, legacy_parse_uri_fn, invalid_user_without_pass_uri, False) def test_legacy_parse_swift_uri_015(self): (legacy_parse_uri,) = from_migration_import( '015_quote_swift_credentials', ['legacy_parse_uri']) uri = legacy_parse_uri( 'swift://acct:usr:pass@example.com/container/obj-id', True) self.assertTrue(uri, 'swift://acct%3Ausr:pass@example.com' '/container/obj-id') self._assert_invalid_swift_uri_raises_bad_store_uri(legacy_parse_uri) def _pre_upgrade_015(self, engine): images = db_utils.get_table(engine, 'images') unquoted_locations = [ 'swift://acct:usr:pass@example.com/container/obj-id', 'file://foo', ] now = datetime.datetime.now() temp = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0) data = [] for i, location in enumerate(unquoted_locations): temp.update(location=location, id=str(uuid.uuid4())) data.append(temp) images.insert().values(temp).execute() return data def _check_015(self, engine, data): images = db_utils.get_table(engine, 'images') quoted_locations = [ 'swift://acct%3Ausr:pass@example.com/container/obj-id', 'file://foo', ] result = images.select().execute() locations = list(map(lambda x: x['location'], result)) for loc in quoted_locations: self.assertIn(loc, locations) def _pre_upgrade_016(self, engine): images = db_utils.get_table(engine, 'images') now = datetime.datetime.now() temp = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='fake-image-id1') images.insert().values(temp).execute() image_members = db_utils.get_table(engine, 'image_members') now = datetime.datetime.now() data = {'deleted': False, 'created_at': now, 'member': 'fake-member', 'updated_at': now, 'can_share': False, 'image_id': 'fake-image-id1'} image_members.insert().values(data).execute() return data def _check_016(self, engine, data): image_members = db_utils.get_table(engine, 'image_members') self.assertIn('status', image_members.c, "'status' column found in image_members table " "columns! image_members table columns: %s" % image_members.c.keys()) def test_legacy_parse_swift_uri_017(self): metadata_encryption_key = 'a' * 16 CONF.set_override('metadata_encryption_key', metadata_encryption_key, enforce_type=True) self.addCleanup(CONF.reset) (legacy_parse_uri, encrypt_location) = from_migration_import( '017_quote_encrypted_swift_credentials', ['legacy_parse_uri', 'encrypt_location']) uri = legacy_parse_uri('swift://acct:usr:pass@example.com' '/container/obj-id', True) self.assertTrue(uri, encrypt_location( 'swift://acct%3Ausr:pass@example.com/container/obj-id')) self._assert_invalid_swift_uri_raises_bad_store_uri(legacy_parse_uri) def _pre_upgrade_017(self, engine): metadata_encryption_key = 'a' * 16 CONF.set_override('metadata_encryption_key', metadata_encryption_key, enforce_type=True) self.addCleanup(CONF.reset) images = db_utils.get_table(engine, 'images') unquoted = 'swift://acct:usr:pass@example.com/container/obj-id' encrypted_unquoted = crypt.urlsafe_encrypt( metadata_encryption_key, unquoted, 64) data = [] now = datetime.datetime.now() temp = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0, location=encrypted_unquoted, id='fakeid1') images.insert().values(temp).execute() locations = [ 'file://ab', 'file://abc', 'swift://acct3A%foobar:pass@example.com/container/obj-id2' ] now = datetime.datetime.now() temp = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0) for i, location in enumerate(locations): temp.update(location=location, id=str(uuid.uuid4())) data.append(temp) images.insert().values(temp).execute() return data def _check_017(self, engine, data): metadata_encryption_key = 'a' * 16 quoted = 'swift://acct%3Ausr:pass@example.com/container/obj-id' images = db_utils.get_table(engine, 'images') result = images.select().execute() locations = list(map(lambda x: x['location'], result)) actual_location = [] for location in locations: if location: try: temp_loc = crypt.urlsafe_decrypt(metadata_encryption_key, location) actual_location.append(temp_loc) except TypeError: actual_location.append(location) except ValueError: actual_location.append(location) self.assertIn(quoted, actual_location) loc_list = ['file://ab', 'file://abc', 'swift://acct3A%foobar:pass@example.com/container/obj-id2'] for location in loc_list: if location not in actual_location: self.fail(_("location: %s data lost") % location) def _pre_upgrade_019(self, engine): images = db_utils.get_table(engine, 'images') now = datetime.datetime.now() base_values = { 'deleted': False, 'created_at': now, 'updated_at': now, 'status': 'active', 'is_public': True, 'min_disk': 0, 'min_ram': 0, } data = [ {'id': 'fake-19-1', 'location': 'http://glance.example.com'}, # NOTE(bcwaldon): images with a location of None should # not be migrated {'id': 'fake-19-2', 'location': None}, ] for image in data: image.update(base_values) images.insert().values(image).execute() return data def _check_019(self, engine, data): image_locations = db_utils.get_table(engine, 'image_locations') records = image_locations.select().execute().fetchall() locations = {il.image_id: il.value for il in records} self.assertEqual('http://glance.example.com', locations.get('fake-19-1')) def _check_020(self, engine, data): images = db_utils.get_table(engine, 'images') self.assertNotIn('location', images.c) def _pre_upgrade_026(self, engine): image_locations = db_utils.get_table(engine, 'image_locations') now = datetime.datetime.now() image_id = 'fake_id' url = 'file:///some/place/onthe/fs' images = db_utils.get_table(engine, 'images') temp = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id=image_id) images.insert().values(temp).execute() temp = dict(deleted=False, created_at=now, updated_at=now, image_id=image_id, value=url) image_locations.insert().values(temp).execute() return image_id def _check_026(self, engine, data): image_locations = db_utils.get_table(engine, 'image_locations') results = image_locations.select().where( image_locations.c.image_id == data).execute() r = list(results) self.assertEqual(1, len(r)) self.assertEqual('file:///some/place/onthe/fs', r[0]['value']) self.assertIn('meta_data', r[0]) x = pickle.loads(r[0]['meta_data']) self.assertEqual({}, x) def _check_027(self, engine, data): table = "images" index = "checksum_image_idx" columns = ["checksum"] meta = sqlalchemy.MetaData() meta.bind = engine new_table = sqlalchemy.Table(table, meta, autoload=True) index_data = [(idx.name, idx.columns.keys()) for idx in new_table.indexes] self.assertIn((index, columns), index_data) def _check_028(self, engine, data): owner_index = "owner_image_idx" columns = ["owner"] images_table = db_utils.get_table(engine, 'images') index_data = [(idx.name, idx.columns.keys()) for idx in images_table.indexes if idx.name == owner_index] self.assertIn((owner_index, columns), index_data) def _post_downgrade_028(self, engine): owner_index = "owner_image_idx" columns = ["owner"] images_table = db_utils.get_table(engine, 'images') index_data = [(idx.name, idx.columns.keys()) for idx in images_table.indexes if idx.name == owner_index] self.assertNotIn((owner_index, columns), index_data) def _pre_upgrade_029(self, engine): image_locations = db_utils.get_table(engine, 'image_locations') meta_data = {'somelist': ['a', 'b', 'c'], 'avalue': 'hello', 'adict': {}} now = datetime.datetime.now() image_id = 'fake_029_id' url = 'file:///some/place/onthe/fs029' images = db_utils.get_table(engine, 'images') temp = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id=image_id) images.insert().values(temp).execute() pickle_md = pickle.dumps(meta_data) temp = dict(deleted=False, created_at=now, updated_at=now, image_id=image_id, value=url, meta_data=pickle_md) image_locations.insert().values(temp).execute() return meta_data, image_id def _check_029(self, engine, data): meta_data = data[0] image_id = data[1] image_locations = db_utils.get_table(engine, 'image_locations') records = image_locations.select().where( image_locations.c.image_id == image_id).execute().fetchall() for r in records: d = jsonutils.loads(r['meta_data']) self.assertEqual(d, meta_data) def _post_downgrade_029(self, engine): image_id = 'fake_029_id' image_locations = db_utils.get_table(engine, 'image_locations') records = image_locations.select().where( image_locations.c.image_id == image_id).execute().fetchall() for r in records: md = r['meta_data'] d = pickle.loads(md) self.assertIsInstance(d, dict) def _check_030(self, engine, data): table = "tasks" index_type = ('ix_tasks_type', ['type']) index_status = ('ix_tasks_status', ['status']) index_owner = ('ix_tasks_owner', ['owner']) index_deleted = ('ix_tasks_deleted', ['deleted']) index_updated_at = ('ix_tasks_updated_at', ['updated_at']) meta = sqlalchemy.MetaData() meta.bind = engine tasks_table = sqlalchemy.Table(table, meta, autoload=True) index_data = [(idx.name, idx.columns.keys()) for idx in tasks_table.indexes] self.assertIn(index_type, index_data) self.assertIn(index_status, index_data) self.assertIn(index_owner, index_data) self.assertIn(index_deleted, index_data) self.assertIn(index_updated_at, index_data) expected = [u'id', u'type', u'status', u'owner', u'input', u'result', u'message', u'expires_at', u'created_at', u'updated_at', u'deleted_at', u'deleted'] # NOTE(flwang): Skip the column type checking for now since Jenkins is # using sqlalchemy.dialects.postgresql.base.TIMESTAMP instead of # DATETIME which is using by mysql and sqlite. col_data = [col.name for col in tasks_table.columns] self.assertEqual(expected, col_data) def _post_downgrade_030(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'tasks') def _pre_upgrade_031(self, engine): images = db_utils.get_table(engine, 'images') now = datetime.datetime.now() image_id = 'fake_031_id' temp = dict(deleted=False, created_at=now, updated_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id=image_id) images.insert().values(temp).execute() locations_table = db_utils.get_table(engine, 'image_locations') locations = [ ('file://ab', '{"a": "yo yo"}'), ('file://ab', '{}'), ('file://ab', '{}'), ('file://ab1', '{"a": "that one, please"}'), ('file://ab1', '{"a": "that one, please"}'), ] temp = dict(deleted=False, created_at=now, updated_at=now, image_id=image_id) for location, metadata in locations: temp.update(value=location, meta_data=metadata) locations_table.insert().values(temp).execute() return image_id def _check_031(self, engine, image_id): locations_table = db_utils.get_table(engine, 'image_locations') result = locations_table.select().where( locations_table.c.image_id == image_id).execute().fetchall() locations = set([(x['value'], x['meta_data']) for x in result]) actual_locations = set([ ('file://ab', '{"a": "yo yo"}'), ('file://ab', '{}'), ('file://ab1', '{"a": "that one, please"}'), ]) self.assertFalse(actual_locations.symmetric_difference(locations)) def _pre_upgrade_032(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'task_info') tasks = db_utils.get_table(engine, 'tasks') now = datetime.datetime.now() base_values = { 'deleted': False, 'created_at': now, 'updated_at': now, 'status': 'active', 'owner': 'TENANT', 'type': 'import', } data = [ { 'id': 'task-1', 'input': 'some input', 'message': None, 'result': 'successful' }, { 'id': 'task-2', 'input': None, 'message': None, 'result': None }, ] for task in data: task.update(base_values) tasks.insert().values(task).execute() return data def _check_032(self, engine, data): task_info_table = db_utils.get_table(engine, 'task_info') task_info_refs = task_info_table.select().execute().fetchall() self.assertEqual(2, len(task_info_refs)) for x in range(len(task_info_refs)): self.assertEqual(task_info_refs[x].task_id, data[x]['id']) self.assertEqual(task_info_refs[x].input, data[x]['input']) self.assertEqual(task_info_refs[x].result, data[x]['result']) self.assertIsNone(task_info_refs[x].message) tasks_table = db_utils.get_table(engine, 'tasks') self.assertNotIn('input', tasks_table.c) self.assertNotIn('result', tasks_table.c) self.assertNotIn('message', tasks_table.c) def _post_downgrade_032(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'task_info') tasks_table = db_utils.get_table(engine, 'tasks') records = tasks_table.select().execute().fetchall() self.assertEqual(2, len(records)) tasks = {t.id: t for t in records} task_1 = tasks.get('task-1') self.assertEqual('some input', task_1.input) self.assertEqual('successful', task_1.result) self.assertIsNone(task_1.message) task_2 = tasks.get('task-2') self.assertIsNone(task_2.input) self.assertIsNone(task_2.result) self.assertIsNone(task_2.message) def _pre_upgrade_033(self, engine): images = db_utils.get_table(engine, 'images') image_locations = db_utils.get_table(engine, 'image_locations') now = datetime.datetime.now() image_id = 'fake_id_028_%d' url = 'file:///some/place/onthe/fs_%d' status_list = ['active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted'] image_id_list = [] for (idx, status) in enumerate(status_list): temp = dict(deleted=False, created_at=now, updated_at=now, status=status, is_public=True, min_disk=0, min_ram=0, id=image_id % idx) images.insert().values(temp).execute() temp = dict(deleted=False, created_at=now, updated_at=now, image_id=image_id % idx, value=url % idx) image_locations.insert().values(temp).execute() image_id_list.append(image_id % idx) return image_id_list def _check_033(self, engine, data): image_locations = db_utils.get_table(engine, 'image_locations') self.assertIn('status', image_locations.c) self.assertEqual(30, image_locations.c['status'].type.length) status_list = ['active', 'active', 'active', 'deleted', 'pending_delete', 'deleted'] for (idx, image_id) in enumerate(data): results = image_locations.select().where( image_locations.c.image_id == image_id).execute() r = list(results) self.assertEqual(1, len(r)) self.assertIn('status', r[0]) self.assertEqual(status_list[idx], r[0]['status']) def _post_downgrade_033(self, engine): image_locations = db_utils.get_table(engine, 'image_locations') self.assertNotIn('status', image_locations.c) def _pre_upgrade_034(self, engine): images = db_utils.get_table(engine, 'images') now = datetime.datetime.now() image_id = 'fake_id_034' temp = dict(deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id=image_id) images.insert().values(temp).execute() def _check_034(self, engine, data): images = db_utils.get_table(engine, 'images') self.assertIn('virtual_size', images.c) result = (images.select() .where(images.c.id == 'fake_id_034') .execute().fetchone()) self.assertIsNone(result.virtual_size) def _post_downgrade_034(self, engine): images = db_utils.get_table(engine, 'images') self.assertNotIn('virtual_size', images.c) def _pre_upgrade_035(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_namespaces') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_properties') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_objects') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_resource_types') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_namespace_resource_types') def _check_035(self, engine, data): meta = sqlalchemy.MetaData() meta.bind = engine # metadef_namespaces table = sqlalchemy.Table("metadef_namespaces", meta, autoload=True) index_namespace = ('ix_namespaces_namespace', ['namespace']) index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes] self.assertIn(index_namespace, index_data) expected_cols = [u'id', u'namespace', u'display_name', u'description', u'visibility', u'protected', u'owner', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) # metadef_objects table = sqlalchemy.Table("metadef_objects", meta, autoload=True) index_namespace_id_name = ( 'ix_objects_namespace_id_name', ['namespace_id', 'name']) index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes] self.assertIn(index_namespace_id_name, index_data) expected_cols = [u'id', u'namespace_id', u'name', u'description', u'required', u'schema', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) # metadef_properties table = sqlalchemy.Table("metadef_properties", meta, autoload=True) index_namespace_id_name = ( 'ix_metadef_properties_namespace_id_name', ['namespace_id', 'name']) index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes] self.assertIn(index_namespace_id_name, index_data) expected_cols = [u'id', u'namespace_id', u'name', u'schema', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) # metadef_resource_types table = sqlalchemy.Table( "metadef_resource_types", meta, autoload=True) index_resource_types_name = ( 'ix_metadef_resource_types_name', ['name']) index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes] self.assertIn(index_resource_types_name, index_data) expected_cols = [u'id', u'name', u'protected', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) # metadef_namespace_resource_types table = sqlalchemy.Table( "metadef_namespace_resource_types", meta, autoload=True) index_ns_res_types_res_type_id_ns_id = ( 'ix_metadef_ns_res_types_res_type_id_ns_id', ['resource_type_id', 'namespace_id']) index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes] self.assertIn(index_ns_res_types_res_type_id_ns_id, index_data) expected_cols = [u'resource_type_id', u'namespace_id', u'properties_target', u'prefix', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) def _post_downgrade_035(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_namespaces') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_properties') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_objects') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_resource_types') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_namespace_resource_types') def _pre_upgrade_036(self, engine): meta = sqlalchemy.MetaData() meta.bind = engine # metadef_objects table = sqlalchemy.Table("metadef_objects", meta, autoload=True) expected_cols = [u'id', u'namespace_id', u'name', u'description', u'required', u'schema', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) # metadef_properties table = sqlalchemy.Table("metadef_properties", meta, autoload=True) expected_cols = [u'id', u'namespace_id', u'name', u'schema', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) def _check_036(self, engine, data): meta = sqlalchemy.MetaData() meta.bind = engine # metadef_objects table = sqlalchemy.Table("metadef_objects", meta, autoload=True) expected_cols = [u'id', u'namespace_id', u'name', u'description', u'required', u'json_schema', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) # metadef_properties table = sqlalchemy.Table("metadef_properties", meta, autoload=True) expected_cols = [u'id', u'namespace_id', u'name', u'json_schema', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) def _post_downgrade_036(self, engine): meta = sqlalchemy.MetaData() meta.bind = engine # metadef_objects table = sqlalchemy.Table("metadef_objects", meta, autoload=True) expected_cols = [u'id', u'namespace_id', u'name', u'description', u'required', u'schema', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) # metadef_properties table = sqlalchemy.Table("metadef_properties", meta, autoload=True) expected_cols = [u'id', u'namespace_id', u'name', u'schema', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) def _check_037(self, engine, data): if engine.name == 'mysql': self.assertFalse(unique_constraint_exist('image_id', 'image_properties', engine)) self.assertTrue(unique_constraint_exist( 'ix_image_properties_image_id_name', 'image_properties', engine)) image_members = db_utils.get_table(engine, 'image_members') images = db_utils.get_table(engine, 'images') self.assertFalse(image_members.c.status.nullable) self.assertFalse(images.c.protected.nullable) now = datetime.datetime.now() temp = dict( deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='fake_image_035' ) images.insert().values(temp).execute() image = (images.select() .where(images.c.id == 'fake_image_035') .execute().fetchone()) self.assertFalse(image['protected']) temp = dict( deleted=False, created_at=now, image_id='fake_image_035', member='fake_member', can_share=True, id=3 ) image_members.insert().values(temp).execute() image_member = (image_members.select() .where(image_members.c.id == 3) .execute().fetchone()) self.assertEqual('pending', image_member['status']) def _post_downgrade_037(self, engine): if engine.name == 'mysql': self.assertTrue(unique_constraint_exist('image_id', 'image_properties', engine)) if engine.name == 'postgresql': self.assertTrue(index_exist('ix_image_properties_image_id_name', 'image_properties', engine)) self.assertFalse(unique_constraint_exist( 'ix_image_properties_image_id_name', 'image_properties', engine)) image_members = db_utils.get_table(engine, 'image_members') images = db_utils.get_table(engine, 'images') self.assertTrue(image_members.c.status.nullable) self.assertTrue(images.c.protected.nullable) now = datetime.datetime.now() temp = dict( deleted=False, created_at=now, status='active', is_public=True, min_disk=0, min_ram=0, id='fake_image_035_d' ) images.insert().values(temp).execute() image = (images.select() .where(images.c.id == 'fake_image_035_d') .execute().fetchone()) self.assertIsNone(image['protected']) temp = dict( deleted=False, created_at=now, image_id='fake_image_035_d', member='fake_member', can_share=True, id=4 ) image_members.insert().values(temp).execute() image_member = (image_members.select() .where(image_members.c.id == 4) .execute().fetchone()) self.assertIsNone(image_member['status']) def _pre_upgrade_038(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_tags') def _check_038(self, engine, data): meta = sqlalchemy.MetaData() meta.bind = engine # metadef_tags table = sqlalchemy.Table("metadef_tags", meta, autoload=True) expected_cols = [u'id', u'namespace_id', u'name', u'created_at', u'updated_at'] col_data = [col.name for col in table.columns] self.assertEqual(expected_cols, col_data) def _post_downgrade_038(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'metadef_tags') def _check_039(self, engine, data): meta = sqlalchemy.MetaData() meta.bind = engine metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta, autoload=True) metadef_properties = sqlalchemy.Table('metadef_properties', meta, autoload=True) metadef_objects = sqlalchemy.Table('metadef_objects', meta, autoload=True) metadef_ns_res_types = sqlalchemy.Table( 'metadef_namespace_resource_types', meta, autoload=True) metadef_resource_types = sqlalchemy.Table('metadef_resource_types', meta, autoload=True) tables = [metadef_namespaces, metadef_properties, metadef_objects, metadef_ns_res_types, metadef_resource_types] for table in tables: for index_name in ['ix_namespaces_namespace', 'ix_objects_namespace_id_name', 'ix_metadef_properties_namespace_id_name']: self.assertFalse(index_exist(index_name, table.name, engine)) for uc_name in ['resource_type_id', 'namespace', 'name', 'namespace_id', 'metadef_objects_namespace_id_name_key', 'metadef_properties_namespace_id_name_key']: self.assertFalse(unique_constraint_exist(uc_name, table.name, engine)) self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id', metadef_ns_res_types.name, engine)) self.assertTrue(index_exist('ix_metadef_namespaces_namespace', metadef_namespaces.name, engine)) self.assertTrue(index_exist('ix_metadef_namespaces_owner', metadef_namespaces.name, engine)) self.assertTrue(index_exist('ix_metadef_objects_name', metadef_objects.name, engine)) self.assertTrue(index_exist('ix_metadef_objects_namespace_id', metadef_objects.name, engine)) self.assertTrue(index_exist('ix_metadef_properties_name', metadef_properties.name, engine)) self.assertTrue(index_exist('ix_metadef_properties_namespace_id', metadef_properties.name, engine)) def _post_downgrade_039(self, engine): meta = sqlalchemy.MetaData() meta.bind = engine metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta, autoload=True) metadef_properties = sqlalchemy.Table('metadef_properties', meta, autoload=True) metadef_objects = sqlalchemy.Table('metadef_objects', meta, autoload=True) metadef_ns_res_types = sqlalchemy.Table( 'metadef_namespace_resource_types', meta, autoload=True) metadef_resource_types = sqlalchemy.Table('metadef_resource_types', meta, autoload=True) self.assertFalse(index_exist('ix_metadef_ns_res_types_namespace_id', metadef_ns_res_types.name, engine)) self.assertFalse(index_exist('ix_metadef_namespaces_namespace', metadef_namespaces.name, engine)) self.assertFalse(index_exist('ix_metadef_namespaces_owner', metadef_namespaces.name, engine)) self.assertFalse(index_exist('ix_metadef_objects_name', metadef_objects.name, engine)) self.assertFalse(index_exist('ix_metadef_objects_namespace_id', metadef_objects.name, engine)) self.assertFalse(index_exist('ix_metadef_properties_name', metadef_properties.name, engine)) self.assertFalse(index_exist('ix_metadef_properties_namespace_id', metadef_properties.name, engine)) self.assertTrue(index_exist('ix_namespaces_namespace', metadef_namespaces.name, engine)) self.assertTrue(index_exist('ix_objects_namespace_id_name', metadef_objects.name, engine)) self.assertTrue(index_exist('ix_metadef_properties_namespace_id_name', metadef_properties.name, engine)) if engine.name == 'postgresql': inspector = inspect(engine) self.assertEqual(1, len(inspector.get_unique_constraints( 'metadef_objects'))) self.assertEqual(1, len(inspector.get_unique_constraints( 'metadef_properties'))) if engine.name == 'mysql': self.assertTrue(unique_constraint_exist( 'namespace_id', metadef_properties.name, engine)) self.assertTrue(unique_constraint_exist( 'namespace_id', metadef_objects.name, engine)) self.assertTrue(unique_constraint_exist( 'resource_type_id', metadef_ns_res_types.name, engine)) self.assertTrue(unique_constraint_exist( 'namespace', metadef_namespaces.name, engine)) self.assertTrue(unique_constraint_exist( 'name', metadef_resource_types.name, engine)) def _check_040(self, engine, data): meta = sqlalchemy.MetaData() meta.bind = engine metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True) if engine.name == 'mysql': self.assertFalse(index_exist('namespace_id', metadef_tags.name, engine)) def _pre_upgrade_041(self, engine): self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'artifacts') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'artifact_tags') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'artifact_properties') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'artifact_blobs') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'artifact_dependencies') self.assertRaises(sqlalchemy.exc.NoSuchTableError, db_utils.get_table, engine, 'artifact_locations') def _check_041(self, engine, data): artifacts_indices = [('ix_artifact_name_and_version', ['name', 'version_prefix', 'version_suffix']), ('ix_artifact_type', ['type_name', 'type_version_prefix', 'type_version_suffix']), ('ix_artifact_state', ['state']), ('ix_artifact_visibility', ['visibility']), ('ix_artifact_owner', ['owner'])] artifacts_columns = ['id', 'name', 'type_name', 'type_version_prefix', 'type_version_suffix', 'type_version_meta', 'version_prefix', 'version_suffix', 'version_meta', 'description', 'visibility', 'state', 'owner', 'created_at', 'updated_at', 'deleted_at', 'published_at'] self.assert_table(engine, 'artifacts', artifacts_indices, artifacts_columns) tags_indices = [('ix_artifact_tags_artifact_id', ['artifact_id']), ('ix_artifact_tags_artifact_id_tag_value', ['artifact_id', 'value'])] tags_columns = ['id', 'artifact_id', 'value', 'created_at', 'updated_at'] self.assert_table(engine, 'artifact_tags', tags_indices, tags_columns) prop_indices = [ ('ix_artifact_properties_artifact_id', ['artifact_id']), ('ix_artifact_properties_name', ['name'])] prop_columns = ['id', 'artifact_id', 'name', 'string_value', 'int_value', 'numeric_value', 'bool_value', 'text_value', 'created_at', 'updated_at', 'position'] self.assert_table(engine, 'artifact_properties', prop_indices, prop_columns) blobs_indices = [ ('ix_artifact_blobs_artifact_id', ['artifact_id']), ('ix_artifact_blobs_name', ['name'])] blobs_columns = ['id', 'artifact_id', 'size', 'checksum', 'name', 'item_key', 'position', 'created_at', 'updated_at'] self.assert_table(engine, 'artifact_blobs', blobs_indices, blobs_columns) dependencies_indices = [ ('ix_artifact_dependencies_source_id', ['artifact_source']), ('ix_artifact_dependencies_direct_dependencies', ['artifact_source', 'is_direct']), ('ix_artifact_dependencies_dest_id', ['artifact_dest']), ('ix_artifact_dependencies_origin_id', ['artifact_origin'])] dependencies_columns = ['id', 'artifact_source', 'artifact_dest', 'artifact_origin', 'is_direct', 'position', 'name', 'created_at', 'updated_at'] self.assert_table(engine, 'artifact_dependencies', dependencies_indices, dependencies_columns) locations_indices = [ ('ix_artifact_blob_locations_blob_id', ['blob_id'])] locations_columns = ['id', 'blob_id', 'value', 'created_at', 'updated_at', 'position', 'status'] self.assert_table(engine, 'artifact_blob_locations', locations_indices, locations_columns) def _pre_upgrade_042(self, engine): meta = sqlalchemy.MetaData() meta.bind = engine metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta, autoload=True) metadef_objects = sqlalchemy.Table('metadef_objects', meta, autoload=True) metadef_properties = sqlalchemy.Table('metadef_properties', meta, autoload=True) metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True) metadef_resource_types = sqlalchemy.Table('metadef_resource_types', meta, autoload=True) metadef_ns_res_types = sqlalchemy.Table( 'metadef_namespace_resource_types', meta, autoload=True) # These will be dropped and recreated as unique constraints. self.assertTrue(index_exist('ix_metadef_namespaces_namespace', metadef_namespaces.name, engine)) self.assertTrue(index_exist('ix_metadef_objects_namespace_id', metadef_objects.name, engine)) self.assertTrue(index_exist('ix_metadef_properties_namespace_id', metadef_properties.name, engine)) self.assertTrue(index_exist('ix_metadef_tags_namespace_id', metadef_tags.name, engine)) self.assertTrue(index_exist('ix_metadef_resource_types_name', metadef_resource_types.name, engine)) # This one will be dropped - not needed self.assertTrue(index_exist( 'ix_metadef_ns_res_types_res_type_id_ns_id', metadef_ns_res_types.name, engine)) # The rest must remain self.assertTrue(index_exist('ix_metadef_namespaces_owner', metadef_namespaces.name, engine)) self.assertTrue(index_exist('ix_metadef_objects_name', metadef_objects.name, engine)) self.assertTrue(index_exist('ix_metadef_properties_name', metadef_properties.name, engine)) self.assertTrue(index_exist('ix_metadef_tags_name', metadef_tags.name, engine)) self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id', metadef_ns_res_types.name, engine)) # To be created self.assertFalse(unique_constraint_exist ('uq_metadef_objects_namespace_id_name', metadef_objects.name, engine) ) self.assertFalse(unique_constraint_exist ('uq_metadef_properties_namespace_id_name', metadef_properties.name, engine) ) self.assertFalse(unique_constraint_exist ('uq_metadef_tags_namespace_id_name', metadef_tags.name, engine) ) self.assertFalse(unique_constraint_exist ('uq_metadef_namespaces_namespace', metadef_namespaces.name, engine) ) self.assertFalse(unique_constraint_exist ('uq_metadef_resource_types_name', metadef_resource_types.name, engine) ) def _check_042(self, engine, data): meta = sqlalchemy.MetaData() meta.bind = engine metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta, autoload=True) metadef_objects = sqlalchemy.Table('metadef_objects', meta, autoload=True) metadef_properties = sqlalchemy.Table('metadef_properties', meta, autoload=True) metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True) metadef_resource_types = sqlalchemy.Table('metadef_resource_types', meta, autoload=True) metadef_ns_res_types = sqlalchemy.Table( 'metadef_namespace_resource_types', meta, autoload=True) # Dropped for unique constraints self.assertFalse(index_exist('ix_metadef_namespaces_namespace', metadef_namespaces.name, engine)) self.assertFalse(index_exist('ix_metadef_objects_namespace_id', metadef_objects.name, engine)) self.assertFalse(index_exist('ix_metadef_properties_namespace_id', metadef_properties.name, engine)) self.assertFalse(index_exist('ix_metadef_tags_namespace_id', metadef_tags.name, engine)) self.assertFalse(index_exist('ix_metadef_resource_types_name', metadef_resource_types.name, engine)) # Dropped - not needed because of the existing primary key self.assertFalse(index_exist( 'ix_metadef_ns_res_types_res_type_id_ns_id', metadef_ns_res_types.name, engine)) # Still exist as before self.assertTrue(index_exist('ix_metadef_namespaces_owner', metadef_namespaces.name, engine)) self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id', metadef_ns_res_types.name, engine)) self.assertTrue(index_exist('ix_metadef_objects_name', metadef_objects.name, engine)) self.assertTrue(index_exist('ix_metadef_properties_name', metadef_properties.name, engine)) self.assertTrue(index_exist('ix_metadef_tags_name', metadef_tags.name, engine)) self.assertTrue(unique_constraint_exist ('uq_metadef_namespaces_namespace', metadef_namespaces.name, engine) ) self.assertTrue(unique_constraint_exist ('uq_metadef_objects_namespace_id_name', metadef_objects.name, engine) ) self.assertTrue(unique_constraint_exist ('uq_metadef_properties_namespace_id_name', metadef_properties.name, engine) ) self.assertTrue(unique_constraint_exist ('uq_metadef_tags_namespace_id_name', metadef_tags.name, engine) ) self.assertTrue(unique_constraint_exist ('uq_metadef_resource_types_name', metadef_resource_types.name, engine) ) def _post_downgrade_042(self, engine): meta = sqlalchemy.MetaData() meta.bind = engine metadef_namespaces = sqlalchemy.Table('metadef_namespaces', meta, autoload=True) metadef_objects = sqlalchemy.Table('metadef_objects', meta, autoload=True) metadef_properties = sqlalchemy.Table('metadef_properties', meta, autoload=True) metadef_tags = sqlalchemy.Table('metadef_tags', meta, autoload=True) metadef_resource_types = sqlalchemy.Table('metadef_resource_types', meta, autoload=True) metadef_ns_res_types = sqlalchemy.Table( 'metadef_namespace_resource_types', meta, autoload=True) # These have been recreated self.assertTrue(index_exist('ix_metadef_namespaces_namespace', metadef_namespaces.name, engine)) self.assertTrue(index_exist('ix_metadef_objects_namespace_id', metadef_objects.name, engine)) self.assertTrue(index_exist('ix_metadef_properties_namespace_id', metadef_properties.name, engine)) self.assertTrue(index_exist('ix_metadef_tags_namespace_id', metadef_tags.name, engine)) self.assertTrue(index_exist('ix_metadef_resource_types_name', metadef_resource_types.name, engine)) self.assertTrue(index_exist( 'ix_metadef_ns_res_types_res_type_id_ns_id', metadef_ns_res_types.name, engine)) # The rest must remain self.assertTrue(index_exist('ix_metadef_namespaces_owner', metadef_namespaces.name, engine)) self.assertTrue(index_exist('ix_metadef_objects_name', metadef_objects.name, engine)) self.assertTrue(index_exist('ix_metadef_properties_name', metadef_properties.name, engine)) self.assertTrue(index_exist('ix_metadef_tags_name', metadef_tags.name, engine)) self.assertTrue(index_exist('ix_metadef_ns_res_types_namespace_id', metadef_ns_res_types.name, engine)) # Dropped self.assertFalse(unique_constraint_exist ('uq_metadef_objects_namespace_id_name', metadef_objects.name, engine) ) self.assertFalse(unique_constraint_exist ('uq_metadef_properties_namespace_id_name', metadef_properties.name, engine) ) self.assertFalse(unique_constraint_exist ('uq_metadef_tags_namespace_id_name', metadef_tags.name, engine) ) self.assertFalse(unique_constraint_exist ('uq_metadef_namespaces_namespace', metadef_namespaces.name, engine) ) self.assertFalse(unique_constraint_exist ('uq_metadef_resource_types_name', metadef_resource_types.name, engine) ) def assert_table(self, engine, table_name, indices, columns): table = db_utils.get_table(engine, table_name) index_data = [(index.name, index.columns.keys()) for index in table.indexes] column_data = [column.name for column in table.columns] self.assertItemsEqual(columns, column_data) self.assertItemsEqual(indices, index_data) class TestMysqlMigrations(test_base.MySQLOpportunisticTestCase, MigrationsMixin): def test_mysql_innodb_tables(self): migration.db_sync(engine=self.migrate_engine) total = self.migrate_engine.execute( "SELECT COUNT(*) " "FROM information_schema.TABLES " "WHERE TABLE_SCHEMA='%s'" % self.migrate_engine.url.database) self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") noninnodb = self.migrate_engine.execute( "SELECT count(*) " "FROM information_schema.TABLES " "WHERE TABLE_SCHEMA='%s' " "AND ENGINE!='InnoDB' " "AND TABLE_NAME!='migrate_version'" % self.migrate_engine.url.database) count = noninnodb.scalar() self.assertEqual(0, count, "%d non InnoDB tables created" % count) class TestPostgresqlMigrations(test_base.PostgreSQLOpportunisticTestCase, MigrationsMixin): pass class TestSqliteMigrations(test_base.DbTestCase, MigrationsMixin): def test_walk_versions(self): # No more downgrades self._walk_versions(False, False) class ModelsMigrationSyncMixin(object): def get_metadata(self): for table in models_metadef.BASE_DICT.metadata.sorted_tables: models.BASE.metadata._add_table(table.name, table.schema, table) for table in models_glare.BASE.metadata.sorted_tables: models.BASE.metadata._add_table(table.name, table.schema, table) return models.BASE.metadata def get_engine(self): return self.engine def db_sync(self, engine): migration.db_sync(engine=engine) # TODO(akamyshikova): remove this method as soon as comparison with Variant # will be implemented in oslo.db or alembic def compare_type(self, ctxt, insp_col, meta_col, insp_type, meta_type): if isinstance(meta_type, types.Variant): meta_orig_type = meta_col.type insp_orig_type = insp_col.type meta_col.type = meta_type.impl insp_col.type = meta_type.impl try: return self.compare_type(ctxt, insp_col, meta_col, insp_type, meta_type.impl) finally: meta_col.type = meta_orig_type insp_col.type = insp_orig_type else: ret = super(ModelsMigrationSyncMixin, self).compare_type( ctxt, insp_col, meta_col, insp_type, meta_type) if ret is not None: return ret return ctxt.impl.compare_type(insp_col, meta_col) def include_object(self, object_, name, type_, reflected, compare_to): if name in ['migrate_version'] and type_ == 'table': return False return True class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_base.MySQLOpportunisticTestCase): pass class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_base.PostgreSQLOpportunisticTestCase): pass class ModelsMigrationsSyncSQLite(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_base.DbTestCase): pass glance-12.0.0/glance/tests/unit/test_cache_middleware.py0000775000567000056710000007604412701407047024427 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import testtools import webob import glance.api.middleware.cache import glance.api.policy from glance.common import exception from glance import context import glance.registry.client.v1.api as registry from glance.tests.unit import base from glance.tests.unit import utils as unit_test_utils class ImageStub(object): def __init__(self, image_id, extra_properties=None, visibility='private'): if extra_properties is None: extra_properties = {} self.image_id = image_id self.visibility = visibility self.status = 'active' self.extra_properties = extra_properties self.checksum = 'c1234' self.size = 123456789 class TestCacheMiddlewareURLMatching(testtools.TestCase): def test_v1_no_match_detail(self): req = webob.Request.blank('/v1/images/detail') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertIsNone(out) def test_v1_no_match_detail_with_query_params(self): req = webob.Request.blank('/v1/images/detail?limit=10') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertIsNone(out) def test_v1_match_id_with_query_param(self): req = webob.Request.blank('/v1/images/asdf?ping=pong') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertEqual(('v1', 'GET', 'asdf'), out) def test_v2_match_id(self): req = webob.Request.blank('/v2/images/asdf/file') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertEqual(('v2', 'GET', 'asdf'), out) def test_v2_no_match_bad_path(self): req = webob.Request.blank('/v2/images/asdf') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertIsNone(out) def test_no_match_unknown_version(self): req = webob.Request.blank('/v3/images/asdf') out = glance.api.middleware.cache.CacheFilter._match_request(req) self.assertIsNone(out) class TestCacheMiddlewareRequestStashCacheInfo(testtools.TestCase): def setUp(self): super(TestCacheMiddlewareRequestStashCacheInfo, self).setUp() self.request = webob.Request.blank('') self.middleware = glance.api.middleware.cache.CacheFilter def test_stash_cache_request_info(self): self.middleware._stash_request_info(self.request, 'asdf', 'GET', 'v2') self.assertEqual('asdf', self.request.environ['api.cache.image_id']) self.assertEqual('GET', self.request.environ['api.cache.method']) self.assertEqual('v2', self.request.environ['api.cache.version']) def test_fetch_cache_request_info(self): self.request.environ['api.cache.image_id'] = 'asdf' self.request.environ['api.cache.method'] = 'GET' self.request.environ['api.cache.version'] = 'v2' (image_id, method, version) = self.middleware._fetch_request_info( self.request) self.assertEqual('asdf', image_id) self.assertEqual('GET', method) self.assertEqual('v2', version) def test_fetch_cache_request_info_unset(self): out = self.middleware._fetch_request_info(self.request) self.assertIsNone(out) class ChecksumTestCacheFilter(glance.api.middleware.cache.CacheFilter): def __init__(self): class DummyCache(object): def get_caching_iter(self, image_id, image_checksum, app_iter): self.image_checksum = image_checksum self.cache = DummyCache() self.policy = unit_test_utils.FakePolicyEnforcer() class TestCacheMiddlewareChecksumVerification(base.IsolatedUnitTest): def setUp(self): super(TestCacheMiddlewareChecksumVerification, self).setUp() self.context = context.RequestContext(is_admin=True) self.request = webob.Request.blank('') self.request.context = self.context def test_checksum_v1_header(self): cache_filter = ChecksumTestCacheFilter() headers = {"x-image-meta-checksum": "1234567890"} resp = webob.Response(request=self.request, headers=headers) cache_filter._process_GET_response(resp, None) self.assertEqual("1234567890", cache_filter.cache.image_checksum) def test_checksum_v2_header(self): cache_filter = ChecksumTestCacheFilter() headers = { "x-image-meta-checksum": "1234567890", "Content-MD5": "abcdefghi" } resp = webob.Response(request=self.request, headers=headers) cache_filter._process_GET_response(resp, None) self.assertEqual("abcdefghi", cache_filter.cache.image_checksum) def test_checksum_missing_header(self): cache_filter = ChecksumTestCacheFilter() resp = webob.Response(request=self.request) cache_filter._process_GET_response(resp, None) self.assertIsNone(cache_filter.cache.image_checksum) class FakeImageSerializer(object): def show(self, response, raw_response): return True class ProcessRequestTestCacheFilter(glance.api.middleware.cache.CacheFilter): def __init__(self): self.serializer = FakeImageSerializer() class DummyCache(object): def __init__(self): self.deleted_images = [] def is_cached(self, image_id): return True def get_caching_iter(self, image_id, image_checksum, app_iter): pass def delete_cached_image(self, image_id): self.deleted_images.append(image_id) def get_image_size(self, image_id): pass self.cache = DummyCache() self.policy = unit_test_utils.FakePolicyEnforcer() class TestCacheMiddlewareProcessRequest(base.IsolatedUnitTest): def _enforcer_from_rules(self, unparsed_rules): rules = policy.Rules.from_dict(unparsed_rules) enforcer = glance.api.policy.Enforcer() enforcer.set_rules(rules, overwrite=True) return enforcer def test_v1_deleted_image_fetch(self): """ Test for determining that when an admin tries to download a deleted image it returns 404 Not Found error. """ def dummy_img_iterator(): for i in range(3): yield i image_id = 'test1' image_meta = { 'id': image_id, 'name': 'fake_image', 'status': 'deleted', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': True, 'updated_at': '', 'properties': {}, } request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext() cache_filter = ProcessRequestTestCacheFilter() self.assertRaises(exception.NotFound, cache_filter._process_v1_request, request, image_id, dummy_img_iterator, image_meta) def test_process_v1_request_for_deleted_but_cached_image(self): """ Test for determining image is deleted from cache when it is not found in Glance Registry. """ def fake_process_v1_request(request, image_id, image_iterator, image_meta): raise exception.ImageNotFound() def fake_get_v1_image_metadata(request, image_id): return {'status': 'active', 'properties': {}} image_id = 'test1' request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext() cache_filter = ProcessRequestTestCacheFilter() self.stubs.Set(cache_filter, '_get_v1_image_metadata', fake_get_v1_image_metadata) self.stubs.Set(cache_filter, '_process_v1_request', fake_process_v1_request) cache_filter.process_request(request) self.assertIn(image_id, cache_filter.cache.deleted_images) def test_v1_process_request_image_fetch(self): def dummy_img_iterator(): for i in range(3): yield i image_id = 'test1' image_meta = { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': False, 'updated_at': '', 'properties': {}, } request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext() cache_filter = ProcessRequestTestCacheFilter() actual = cache_filter._process_v1_request( request, image_id, dummy_img_iterator, image_meta) self.assertTrue(actual) def test_v1_remove_location_image_fetch(self): class CheckNoLocationDataSerializer(object): def show(self, response, raw_response): return 'location_data' in raw_response['image_meta'] def dummy_img_iterator(): for i in range(3): yield i image_id = 'test1' image_meta = { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': False, 'updated_at': '', 'properties': {}, } request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext() cache_filter = ProcessRequestTestCacheFilter() cache_filter.serializer = CheckNoLocationDataSerializer() actual = cache_filter._process_v1_request( request, image_id, dummy_img_iterator, image_meta) self.assertFalse(actual) def test_verify_metadata_deleted_image(self): """ Test verify_metadata raises exception.NotFound for a deleted image """ image_meta = {'status': 'deleted', 'is_public': True, 'deleted': True} cache_filter = ProcessRequestTestCacheFilter() self.assertRaises(exception.NotFound, cache_filter._verify_metadata, image_meta) def test_verify_metadata_zero_size(self): """ Test verify_metadata updates metadata with cached image size for images with 0 size """ image_size = 1 def fake_get_image_size(image_id): return image_size image_id = 'test1' image_meta = {'size': 0, 'deleted': False, 'id': image_id, 'status': 'active'} cache_filter = ProcessRequestTestCacheFilter() self.stubs.Set(cache_filter.cache, 'get_image_size', fake_get_image_size) cache_filter._verify_metadata(image_meta) self.assertEqual(image_size, image_meta['size']) def test_v2_process_request_response_headers(self): def dummy_img_iterator(): for i in range(3): yield i image_id = 'test1' request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext() request.environ['api.cache.image'] = ImageStub(image_id) image_meta = { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': False, 'updated_at': '', 'properties': {}, } cache_filter = ProcessRequestTestCacheFilter() response = cache_filter._process_v2_request( request, image_id, dummy_img_iterator, image_meta) self.assertEqual('application/octet-stream', response.headers['Content-Type']) self.assertEqual('c1234', response.headers['Content-MD5']) self.assertEqual('123456789', response.headers['Content-Length']) def test_process_request_without_download_image_policy(self): """ Test for cache middleware skip processing when request context has not 'download_image' role. """ def fake_get_v1_image_metadata(*args, **kwargs): return {'status': 'active', 'properties': {}} image_id = 'test1' request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext() cache_filter = ProcessRequestTestCacheFilter() cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata enforcer = self._enforcer_from_rules({'download_image': '!'}) cache_filter.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_request, request) def test_v1_process_request_download_restricted(self): """ Test process_request for v1 api where _member_ role not able to download the image with custom property. """ image_id = 'test1' def fake_get_v1_image_metadata(*args, **kwargs): return { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': False, 'updated_at': '', 'x_test_key': 'test_1234' } enforcer = self._enforcer_from_rules({ "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" }) request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext(roles=['_member_']) cache_filter = ProcessRequestTestCacheFilter() cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata cache_filter.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_request, request) def test_v1_process_request_download_permitted(self): """ Test process_request for v1 api where member role able to download the image with custom property. """ image_id = 'test1' def fake_get_v1_image_metadata(*args, **kwargs): return { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': False, 'updated_at': '', 'x_test_key': 'test_1234' } request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext(roles=['member']) cache_filter = ProcessRequestTestCacheFilter() cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() actual = cache_filter.process_request(request) self.assertTrue(actual) def test_v1_process_request_image_meta_not_found(self): """ Test process_request for v1 api where registry raises NotFound exception as image metadata not found. """ image_id = 'test1' def fake_get_v1_image_metadata(*args, **kwargs): raise exception.NotFound() request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext(roles=['_member_']) cache_filter = ProcessRequestTestCacheFilter() self.stubs.Set(registry, 'get_image_metadata', fake_get_v1_image_metadata) rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() self.assertRaises(webob.exc.HTTPNotFound, cache_filter.process_request, request) def test_v2_process_request_download_restricted(self): """ Test process_request for v2 api where _member_ role not able to download the image with custom property. """ image_id = 'test1' extra_properties = { 'x_test_key': 'test_1234' } def fake_get_v2_image_metadata(*args, **kwargs): image = ImageStub(image_id, extra_properties=extra_properties) request.environ['api.cache.image'] = image return glance.api.policy.ImageTarget(image) enforcer = self._enforcer_from_rules({ "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" }) request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext(roles=['_member_']) cache_filter = ProcessRequestTestCacheFilter() cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata cache_filter.policy = enforcer self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_request, request) def test_v2_process_request_download_permitted(self): """ Test process_request for v2 api where member role able to download the image with custom property. """ image_id = 'test1' extra_properties = { 'x_test_key': 'test_1234' } def fake_get_v2_image_metadata(*args, **kwargs): image = ImageStub(image_id, extra_properties=extra_properties) request.environ['api.cache.image'] = image return glance.api.policy.ImageTarget(image) request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext(roles=['member']) cache_filter = ProcessRequestTestCacheFilter() cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() actual = cache_filter.process_request(request) self.assertTrue(actual) class TestCacheMiddlewareProcessResponse(base.IsolatedUnitTest): def test_process_v1_DELETE_response(self): image_id = 'test1' request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext() cache_filter = ProcessRequestTestCacheFilter() headers = {"x-image-meta-deleted": True} resp = webob.Response(request=request, headers=headers) actual = cache_filter._process_DELETE_response(resp, image_id) self.assertEqual(resp, actual) def test_get_status_code(self): headers = {"x-image-meta-deleted": True} resp = webob.Response(headers=headers) cache_filter = ProcessRequestTestCacheFilter() actual = cache_filter.get_status_code(resp) self.assertEqual(200, actual) def test_process_response(self): def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v1') def fake_get_v1_image_metadata(*args, **kwargs): return {'properties': {}} cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata image_id = 'test1' request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext() headers = {"x-image-meta-deleted": True} resp = webob.Response(request=request, headers=headers) actual = cache_filter.process_response(resp) self.assertEqual(resp, actual) def test_process_response_without_download_image_policy(self): """ Test for cache middleware raise webob.exc.HTTPForbidden directly when request context has not 'download_image' role. """ def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v1') def fake_get_v1_image_metadata(*args, **kwargs): return {'properties': {}} cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata rules = {'download_image': '!'} self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() image_id = 'test1' request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext() resp = webob.Response(request=request) self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_response, resp) self.assertEqual([b''], resp.app_iter) def test_v1_process_response_download_restricted(self): """ Test process_response for v1 api where _member_ role not able to download the image with custom property. """ image_id = 'test1' def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v1') def fake_get_v1_image_metadata(*args, **kwargs): return { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': False, 'updated_at': '', 'x_test_key': 'test_1234' } cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext(roles=['_member_']) resp = webob.Response(request=request) self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_response, resp) def test_v1_process_response_download_permitted(self): """ Test process_response for v1 api where member role able to download the image with custom property. """ image_id = 'test1' def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v1') def fake_get_v1_image_metadata(*args, **kwargs): return { 'id': image_id, 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': False, 'updated_at': '', 'x_test_key': 'test_1234' } cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info cache_filter._get_v1_image_metadata = fake_get_v1_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext(roles=['member']) resp = webob.Response(request=request) actual = cache_filter.process_response(resp) self.assertEqual(resp, actual) def test_v1_process_response_image_meta_not_found(self): """ Test process_response for v1 api where registry raises NotFound exception as image metadata not found. """ image_id = 'test1' def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v1') def fake_get_v1_image_metadata(*args, **kwargs): raise exception.NotFound() cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info self.stubs.Set(registry, 'get_image_metadata', fake_get_v1_image_metadata) rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() request = webob.Request.blank('/v1/images/%s' % image_id) request.context = context.RequestContext(roles=['_member_']) resp = webob.Response(request=request) self.assertRaises(webob.exc.HTTPNotFound, cache_filter.process_response, resp) def test_v2_process_response_download_restricted(self): """ Test process_response for v2 api where _member_ role not able to download the image with custom property. """ image_id = 'test1' extra_properties = { 'x_test_key': 'test_1234' } def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v2') def fake_get_v2_image_metadata(*args, **kwargs): image = ImageStub(image_id, extra_properties=extra_properties) request.environ['api.cache.image'] = image return glance.api.policy.ImageTarget(image) cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext(roles=['_member_']) resp = webob.Response(request=request) self.assertRaises(webob.exc.HTTPForbidden, cache_filter.process_response, resp) def test_v2_process_response_download_permitted(self): """ Test process_response for v2 api where member role able to download the image with custom property. """ image_id = 'test1' extra_properties = { 'x_test_key': 'test_1234' } def fake_fetch_request_info(*args, **kwargs): return ('test1', 'GET', 'v2') def fake_get_v2_image_metadata(*args, **kwargs): image = ImageStub(image_id, extra_properties=extra_properties) request.environ['api.cache.image'] = image return glance.api.policy.ImageTarget(image) cache_filter = ProcessRequestTestCacheFilter() cache_filter._fetch_request_info = fake_fetch_request_info cache_filter._get_v2_image_metadata = fake_get_v2_image_metadata rules = { "restricted": "not ('test_1234':%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) cache_filter.policy = glance.api.policy.Enforcer() request = webob.Request.blank('/v2/images/test1/file') request.context = context.RequestContext(roles=['member']) resp = webob.Response(request=request) actual = cache_filter.process_response(resp) self.assertEqual(resp, actual) glance-12.0.0/glance/tests/unit/test_store_location.py0000664000567000056710000000620012701407047024173 0ustar jenkinsjenkins00000000000000# Copyright 2011-2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store import mock from glance.common import exception from glance.common import store_utils import glance.location from glance.tests.unit import base CONF = {'default_store': 'file', 'swift_store_auth_address': 'localhost:8080', 'swift_store_container': 'glance', 'swift_store_user': 'user', 'swift_store_key': 'key', 'default_swift_reference': 'store_1' } class TestStoreLocation(base.StoreClearingUnitTest): class FakeImageProxy(object): size = None context = None store_api = mock.Mock() store_utils = store_utils def test_add_location_for_image_without_size(self): def fake_get_size_from_backend(uri, context=None): return 1 self.stubs.Set(glance_store, 'get_size_from_backend', fake_get_size_from_backend) with mock.patch('glance.location._check_image_location'): loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}} loc2 = {'url': 'file:///fake2.img.tar.gz', 'metadata': {}} # Test for insert location image1 = TestStoreLocation.FakeImageProxy() locations = glance.location.StoreLocations(image1, []) locations.insert(0, loc2) self.assertEqual(1, image1.size) # Test for set_attr of _locations_proxy image2 = TestStoreLocation.FakeImageProxy() locations = glance.location.StoreLocations(image2, [loc1]) locations[0] = loc2 self.assertIn(loc2, locations) self.assertEqual(1, image2.size) def test_add_location_with_restricted_sources(self): loc1 = {'url': 'file:///fake1.img.tar.gz', 'metadata': {}} loc2 = {'url': 'swift+config:///xxx', 'metadata': {}} loc3 = {'url': 'filesystem:///foo.img.tar.gz', 'metadata': {}} # Test for insert location image1 = TestStoreLocation.FakeImageProxy() locations = glance.location.StoreLocations(image1, []) self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc1) self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc3) self.assertNotIn(loc1, locations) self.assertNotIn(loc3, locations) # Test for set_attr of _locations_proxy image2 = TestStoreLocation.FakeImageProxy() locations = glance.location.StoreLocations(image2, [loc1]) self.assertRaises(exception.BadStoreUri, locations.insert, 0, loc2) self.assertNotIn(loc2, locations) glance-12.0.0/glance/tests/unit/test_context_middleware.py0000664000567000056710000001511612701407047025036 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from glance.api.middleware import context import glance.context from glance.tests.unit import base class TestContextMiddleware(base.IsolatedUnitTest): def _build_request(self, roles=None, identity_status='Confirmed', service_catalog=None): req = webob.Request.blank('/') req.headers['x-auth-token'] = 'token1' req.headers['x-identity-status'] = identity_status req.headers['x-user-id'] = 'user1' req.headers['x-tenant-id'] = 'tenant1' _roles = roles or ['role1', 'role2'] req.headers['x-roles'] = ','.join(_roles) if service_catalog: req.headers['x-service-catalog'] = service_catalog return req def _build_middleware(self): return context.ContextMiddleware(None) def test_header_parsing(self): req = self._build_request() self._build_middleware().process_request(req) self.assertEqual('token1', req.context.auth_token) self.assertEqual('user1', req.context.user) self.assertEqual('tenant1', req.context.tenant) self.assertEqual(['role1', 'role2'], req.context.roles) def test_is_admin_flag(self): # is_admin check should look for 'admin' role by default req = self._build_request(roles=['admin', 'role2']) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) # without the 'admin' role, is_admin should be False req = self._build_request() self._build_middleware().process_request(req) self.assertFalse(req.context.is_admin) # if we change the admin_role attribute, we should be able to use it req = self._build_request() self.config(admin_role='role1') self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_roles_case_insensitive(self): # accept role from request req = self._build_request(roles=['Admin', 'role2']) self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) # accept role from config req = self._build_request(roles=['role1']) self.config(admin_role='rOLe1') self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_roles_stripping(self): # stripping extra spaces in request req = self._build_request(roles=['\trole1']) self.config(admin_role='role1') self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) # stripping extra spaces in config req = self._build_request(roles=['\trole1\n']) self.config(admin_role=' role1\t') self._build_middleware().process_request(req) self.assertTrue(req.context.is_admin) def test_anonymous_access_enabled(self): req = self._build_request(identity_status='Nope') self.config(allow_anonymous_access=True) middleware = self._build_middleware() middleware.process_request(req) self.assertIsNone(req.context.auth_token) self.assertIsNone(req.context.user) self.assertIsNone(req.context.tenant) self.assertEqual([], req.context.roles) self.assertFalse(req.context.is_admin) self.assertTrue(req.context.read_only) def test_anonymous_access_defaults_to_disabled(self): req = self._build_request(identity_status='Nope') middleware = self._build_middleware() self.assertRaises(webob.exc.HTTPUnauthorized, middleware.process_request, req) def test_service_catalog(self): catalog_json = "[{}]" req = self._build_request(service_catalog=catalog_json) self._build_middleware().process_request(req) self.assertEqual([{}], req.context.service_catalog) def test_invalid_service_catalog(self): catalog_json = "bad json" req = self._build_request(service_catalog=catalog_json) middleware = self._build_middleware() self.assertRaises(webob.exc.HTTPInternalServerError, middleware.process_request, req) def test_response(self): req = self._build_request() req.context = glance.context.RequestContext() request_id = req.context.request_id resp = webob.Response() resp.request = req self._build_middleware().process_response(resp) self.assertEqual(request_id, resp.headers['x-openstack-request-id']) resp_req_id = resp.headers['x-openstack-request-id'] # Validate that request-id do not starts with 'req-req-' if isinstance(resp_req_id, bytes): resp_req_id = resp_req_id.decode('utf-8') self.assertFalse(resp_req_id.startswith('req-req-')) self.assertTrue(resp_req_id.startswith('req-')) class TestUnauthenticatedContextMiddleware(base.IsolatedUnitTest): def test_request(self): middleware = context.UnauthenticatedContextMiddleware(None) req = webob.Request.blank('/') middleware.process_request(req) self.assertIsNone(req.context.auth_token) self.assertIsNone(req.context.user) self.assertIsNone(req.context.tenant) self.assertEqual([], req.context.roles) self.assertTrue(req.context.is_admin) def test_response(self): middleware = context.UnauthenticatedContextMiddleware(None) req = webob.Request.blank('/') req.context = glance.context.RequestContext() request_id = req.context.request_id resp = webob.Response() resp.request = req middleware.process_response(resp) self.assertEqual(request_id, resp.headers['x-openstack-request-id']) resp_req_id = resp.headers['x-openstack-request-id'] if isinstance(resp_req_id, bytes): resp_req_id = resp_req_id.decode('utf-8') # Validate that request-id do not starts with 'req-req-' self.assertFalse(resp_req_id.startswith('req-req-')) self.assertTrue(resp_req_id.startswith('req-')) glance-12.0.0/glance/tests/unit/test_notifier.py0000664000567000056710000006075012701407047023000 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import glance_store import mock from oslo_config import cfg import oslo_messaging import webob import glance.async from glance.common import exception from glance.common import timeutils import glance.context from glance import notifier import glance.tests.unit.utils as unit_test_utils from glance.tests import utils DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' class ImageStub(glance.domain.Image): def get_data(self, offset=0, chunk_size=None): return ['01234', '56789'] def set_data(self, data, size=None): for chunk in data: pass class ImageRepoStub(object): def remove(self, *args, **kwargs): return 'image_from_get' def save(self, *args, **kwargs): return 'image_from_save' def add(self, *args, **kwargs): return 'image_from_add' def get(self, *args, **kwargs): return 'image_from_get' def list(self, *args, **kwargs): return ['images_from_list'] class ImageMemberRepoStub(object): def remove(self, *args, **kwargs): return 'image_member_from_remove' def save(self, *args, **kwargs): return 'image_member_from_save' def add(self, *args, **kwargs): return 'image_member_from_add' def get(self, *args, **kwargs): return 'image_member_from_get' def list(self, *args, **kwargs): return ['image_members_from_list'] class TaskStub(glance.domain.TaskStub): def run(self, executor): pass class Task(glance.domain.Task): def succeed(self, result): pass def fail(self, message): pass class TaskRepoStub(object): def remove(self, *args, **kwargs): return 'task_from_remove' def save(self, *args, **kwargs): return 'task_from_save' def add(self, *args, **kwargs): return 'task_from_add' def get_task(self, *args, **kwargs): return 'task_from_get' def list(self, *args, **kwargs): return ['tasks_from_list'] class TestNotifier(utils.BaseTestCase): @mock.patch.object(oslo_messaging, 'Notifier') @mock.patch.object(oslo_messaging, 'get_transport') def _test_load_strategy(self, mock_get_transport, mock_notifier, url, driver): nfier = notifier.Notifier() mock_get_transport.assert_called_with(cfg.CONF, aliases=notifier._ALIASES) self.assertIsNotNone(nfier._transport) mock_notifier.assert_called_with(nfier._transport, publisher_id='image.localhost') self.assertIsNotNone(nfier._notifier) def test_notifier_load(self): self._test_load_strategy(url=None, driver=None) @mock.patch.object(oslo_messaging, 'set_transport_defaults') def test_set_defaults(self, mock_set_trans_defaults): notifier.set_defaults(control_exchange='foo') mock_set_trans_defaults.assert_called_with('foo') notifier.set_defaults() mock_set_trans_defaults.assert_called_with('glance') class TestImageNotifications(utils.BaseTestCase): """Test Image Notifications work""" def setUp(self): super(TestImageNotifications, self).setUp() self.image = ImageStub( image_id=UUID1, name='image-1', status='active', size=1024, created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, visibility='public', container_format='ami', virtual_size=2048, tags=['one', 'two'], disk_format='ami', min_ram=128, min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91', locations=['http://127.0.0.1']) self.context = glance.context.RequestContext(tenant=TENANT2, user=USER1) self.image_repo_stub = ImageRepoStub() self.notifier = unit_test_utils.FakeNotifier() self.image_repo_proxy = glance.notifier.ImageRepoProxy( self.image_repo_stub, self.context, self.notifier) self.image_proxy = glance.notifier.ImageProxy( self.image, self.context, self.notifier) def test_image_save_notification(self): self.image_repo_proxy.save(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.update', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) if 'location' in output_log['payload']: self.fail('Notification contained location field.') def test_image_add_notification(self): self.image_repo_proxy.add(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) if 'location' in output_log['payload']: self.fail('Notification contained location field.') def test_image_delete_notification(self): self.image_repo_proxy.remove(self.image_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.delete', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) self.assertTrue(output_log['payload']['deleted']) if 'location' in output_log['payload']: self.fail('Notification contained location field.') def test_image_get(self): image = self.image_repo_proxy.get(UUID1) self.assertIsInstance(image, glance.notifier.ImageProxy) self.assertEqual('image_from_get', image.repo) def test_image_list(self): images = self.image_repo_proxy.list() self.assertIsInstance(images[0], glance.notifier.ImageProxy) self.assertEqual('images_from_list', images[0].repo) def test_image_get_data_should_call_next_image_get_data(self): with mock.patch.object(self.image, 'get_data') as get_data_mock: self.image_proxy.get_data() self.assertTrue(get_data_mock.called) def test_image_get_data_notification(self): self.image_proxy.size = 10 data = ''.join(self.image_proxy.get_data()) self.assertEqual('0123456789', data) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.send', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['image_id']) self.assertEqual(TENANT2, output_log['payload']['receiver_tenant_id']) self.assertEqual(USER1, output_log['payload']['receiver_user_id']) self.assertEqual(10, output_log['payload']['bytes_sent']) self.assertEqual(TENANT1, output_log['payload']['owner_id']) def test_image_get_data_size_mismatch(self): self.image_proxy.size = 11 list(self.image_proxy.get_data()) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.send', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['image_id']) def test_image_set_data_prepare_notification(self): insurance = {'called': False} def data_iterator(): output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.prepare', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) yield 'abcd' yield 'efgh' insurance['called'] = True self.image_proxy.set_data(data_iterator(), 8) self.assertTrue(insurance['called']) def test_image_set_data_upload_and_activate_notification(self): def data_iterator(): self.notifier.log = [] yield 'abcde' yield 'fghij' self.image_proxy.set_data(data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(2, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) output_log = output_logs[1] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.activate', output_log['event_type']) self.assertEqual(self.image.image_id, output_log['payload']['id']) def test_image_set_data_storage_full(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise glance_store.StorageFull(message='Modern Major General') self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Modern Major General', output_log['payload']) def test_image_set_data_value_error(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise ValueError('value wrong') self.assertRaises(webob.exc.HTTPBadRequest, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('value wrong', output_log['payload']) def test_image_set_data_duplicate(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise exception.Duplicate('Cant have duplicates') self.assertRaises(webob.exc.HTTPConflict, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Cant have duplicates', output_log['payload']) def test_image_set_data_storage_write_denied(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise glance_store.StorageWriteDenied(message='The Very Model') self.assertRaises(webob.exc.HTTPServiceUnavailable, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('The Very Model', output_log['payload']) def test_image_set_data_forbidden(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise exception.Forbidden('Not allowed') self.assertRaises(webob.exc.HTTPForbidden, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Not allowed', output_log['payload']) def test_image_set_data_not_found(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise exception.NotFound('Not found') self.assertRaises(webob.exc.HTTPNotFound, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Not found', output_log['payload']) def test_image_set_data_HTTP_error(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise webob.exc.HTTPError('Http issue') self.assertRaises(webob.exc.HTTPError, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Http issue', output_log['payload']) def test_image_set_data_error(self): def data_iterator(): self.notifier.log = [] yield 'abcde' raise exception.GlanceException('Failed') self.assertRaises(exception.GlanceException, self.image_proxy.set_data, data_iterator(), 10) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('ERROR', output_log['notification_type']) self.assertEqual('image.upload', output_log['event_type']) self.assertIn('Failed', output_log['payload']) class TestImageMemberNotifications(utils.BaseTestCase): """Test Image Member Notifications work""" def setUp(self): super(TestImageMemberNotifications, self).setUp() self.context = glance.context.RequestContext(tenant=TENANT2, user=USER1) self.notifier = unit_test_utils.FakeNotifier() self.image = ImageStub( image_id=UUID1, name='image-1', status='active', size=1024, created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, visibility='public', container_format='ami', tags=['one', 'two'], disk_format='ami', min_ram=128, min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91', locations=['http://127.0.0.1']) self.image_member = glance.domain.ImageMembership( id=1, image_id=UUID1, member_id=TENANT1, created_at=DATETIME, updated_at=DATETIME, status='accepted') self.image_member_repo_stub = ImageMemberRepoStub() self.image_member_repo_proxy = glance.notifier.ImageMemberRepoProxy( self.image_member_repo_stub, self.image, self.context, self.notifier) self.image_member_proxy = glance.notifier.ImageMemberProxy( self.image_member, self.context, self.notifier) def _assert_image_member_with_notifier(self, output_log, deleted=False): self.assertEqual(self.image_member.member_id, output_log['payload']['member_id']) self.assertEqual(self.image_member.image_id, output_log['payload']['image_id']) self.assertEqual(self.image_member.status, output_log['payload']['status']) self.assertEqual(timeutils.isotime(self.image_member.created_at), output_log['payload']['created_at']) self.assertEqual(timeutils.isotime(self.image_member.updated_at), output_log['payload']['updated_at']) if deleted: self.assertTrue(output_log['payload']['deleted']) self.assertIsNotNone(output_log['payload']['deleted_at']) else: self.assertFalse(output_log['payload']['deleted']) self.assertIsNone(output_log['payload']['deleted_at']) def test_image_member_add_notification(self): self.image_member_repo_proxy.add(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.member.create', output_log['event_type']) self._assert_image_member_with_notifier(output_log) def test_image_member_save_notification(self): self.image_member_repo_proxy.save(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.member.update', output_log['event_type']) self._assert_image_member_with_notifier(output_log) def test_image_member_delete_notification(self): self.image_member_repo_proxy.remove(self.image_member_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.member.delete', output_log['event_type']) self._assert_image_member_with_notifier(output_log, deleted=True) def test_image_member_get(self): image_member = self.image_member_repo_proxy.get(TENANT1) self.assertIsInstance(image_member, glance.notifier.ImageMemberProxy) self.assertEqual('image_member_from_get', image_member.repo) def test_image_member_list(self): image_members = self.image_member_repo_proxy.list() self.assertIsInstance(image_members[0], glance.notifier.ImageMemberProxy) self.assertEqual('image_members_from_list', image_members[0].repo) class TestTaskNotifications(utils.BaseTestCase): """Test Task Notifications work""" def setUp(self): super(TestTaskNotifications, self).setUp() task_input = {"loc": "fake"} self.task_stub = TaskStub( task_id='aaa', task_type='import', status='pending', owner=TENANT2, expires_at=None, created_at=DATETIME, updated_at=DATETIME, ) self.task = Task( task_id='aaa', task_type='import', status='pending', owner=TENANT2, expires_at=None, created_at=DATETIME, updated_at=DATETIME, task_input=task_input, result='res', message='blah' ) self.context = glance.context.RequestContext( tenant=TENANT2, user=USER1 ) self.task_repo_stub = TaskRepoStub() self.notifier = unit_test_utils.FakeNotifier() self.task_repo_proxy = glance.notifier.TaskRepoProxy( self.task_repo_stub, self.context, self.notifier ) self.task_proxy = glance.notifier.TaskProxy( self.task, self.context, self.notifier ) self.task_stub_proxy = glance.notifier.TaskStubProxy( self.task_stub, self.context, self.notifier ) self.patcher = mock.patch.object(timeutils, 'utcnow') mock_utcnow = self.patcher.start() mock_utcnow.return_value = datetime.datetime.utcnow() def tearDown(self): super(TestTaskNotifications, self).tearDown() self.patcher.stop() def test_task_create_notification(self): self.task_repo_proxy.add(self.task_stub_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.create', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) self.assertEqual( timeutils.isotime(self.task.updated_at), output_log['payload']['updated_at'] ) self.assertEqual( timeutils.isotime(self.task.created_at), output_log['payload']['created_at'] ) if 'location' in output_log['payload']: self.fail('Notification contained location field.') def test_task_delete_notification(self): now = timeutils.isotime() self.task_repo_proxy.remove(self.task_stub_proxy) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.delete', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) self.assertEqual( timeutils.isotime(self.task.updated_at), output_log['payload']['updated_at'] ) self.assertEqual( timeutils.isotime(self.task.created_at), output_log['payload']['created_at'] ) self.assertEqual( now, output_log['payload']['deleted_at'] ) if 'location' in output_log['payload']: self.fail('Notification contained location field.') def test_task_run_notification(self): with mock.patch('glance.async.TaskExecutor') as mock_executor: executor = mock_executor.return_value executor._run.return_value = mock.Mock() self.task_proxy.run(executor=mock_executor) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.run', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) def test_task_processing_notification(self): self.task_proxy.begin_processing() output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.processing', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) def test_task_success_notification(self): self.task_proxy.begin_processing() self.task_proxy.succeed(result=None) output_logs = self.notifier.get_logs() self.assertEqual(2, len(output_logs)) output_log = output_logs[1] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.success', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) def test_task_failure_notification(self): self.task_proxy.fail(message=None) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.failure', output_log['event_type']) self.assertEqual(self.task.task_id, output_log['payload']['id']) glance-12.0.0/glance/tests/unit/test_image_cache_client.py0000664000567000056710000001131512701407047024715 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from glance.common import exception from glance.image_cache import client from glance.tests import utils class CacheClientTestCase(utils.BaseTestCase): def setUp(self): super(CacheClientTestCase, self).setUp() self.client = client.CacheClient('test_host') self.client.do_request = mock.Mock() def test_delete_cached_image(self): self.client.do_request.return_value = utils.FakeHTTPResponse() self.assertTrue(self.client.delete_cached_image('test_id')) self.client.do_request.assert_called_with("DELETE", "/cached_images/test_id") def test_get_cached_images(self): expected_data = b'{"cached_images": "some_images"}' self.client.do_request.return_value = utils.FakeHTTPResponse( data=expected_data) self.assertEqual("some_images", self.client.get_cached_images()) self.client.do_request.assert_called_with("GET", "/cached_images") def test_get_queued_images(self): expected_data = b'{"queued_images": "some_images"}' self.client.do_request.return_value = utils.FakeHTTPResponse( data=expected_data) self.assertEqual("some_images", self.client.get_queued_images()) self.client.do_request.assert_called_with("GET", "/queued_images") def test_delete_all_cached_images(self): expected_data = b'{"num_deleted": 4}' self.client.do_request.return_value = utils.FakeHTTPResponse( data=expected_data) self.assertEqual(4, self.client.delete_all_cached_images()) self.client.do_request.assert_called_with("DELETE", "/cached_images") def test_queue_image_for_caching(self): self.client.do_request.return_value = utils.FakeHTTPResponse() self.assertTrue(self.client.queue_image_for_caching('test_id')) self.client.do_request.assert_called_with("PUT", "/queued_images/test_id") def test_delete_queued_image(self): self.client.do_request.return_value = utils.FakeHTTPResponse() self.assertTrue(self.client.delete_queued_image('test_id')) self.client.do_request.assert_called_with("DELETE", "/queued_images/test_id") def test_delete_all_queued_images(self): expected_data = b'{"num_deleted": 4}' self.client.do_request.return_value = utils.FakeHTTPResponse( data=expected_data) self.assertEqual(4, self.client.delete_all_queued_images()) self.client.do_request.assert_called_with("DELETE", "/queued_images") class GetClientTestCase(utils.BaseTestCase): def setUp(self): super(GetClientTestCase, self).setUp() self.host = 'test_host' self.env = os.environ.copy() os.environ.clear() def tearDown(self): os.environ = self.env super(GetClientTestCase, self).tearDown() def test_get_client_host_only(self): expected_creds = { 'username': None, 'password': None, 'tenant': None, 'auth_url': None, 'strategy': 'noauth', 'region': None } self.assertEqual(expected_creds, client.get_client(self.host).creds) def test_get_client_all_creds(self): expected_creds = { 'username': 'name', 'password': 'pass', 'tenant': 'ten', 'auth_url': 'url', 'strategy': 'keystone', 'region': 'reg' } creds = client.get_client( self.host, username='name', password='pass', tenant='ten', auth_url='url', auth_strategy='strategy', region='reg' ).creds self.assertEqual(expected_creds, creds) def test_get_client_client_configuration_error(self): self.assertRaises(exception.ClientConfigurationError, client.get_client, self.host, username='name', password='pass', tenant='ten', auth_strategy='keystone', region='reg') glance-12.0.0/glance/tests/unit/base.py0000664000567000056710000000540212701407047021025 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import glance_store as store from glance_store import location from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import options from oslo_serialization import jsonutils from glance.tests import stubs from glance.tests import utils as test_utils CONF = cfg.CONF class StoreClearingUnitTest(test_utils.BaseTestCase): def setUp(self): super(StoreClearingUnitTest, self).setUp() # Ensure stores + locations cleared location.SCHEME_TO_CLS_MAP = {} self._create_stores() self.addCleanup(setattr, location, 'SCHEME_TO_CLS_MAP', dict()) def _create_stores(self, passing_config=True): """Create known stores. Mock out sheepdog's subprocess dependency on collie. :param passing_config: making store driver passes basic configurations. :returns: the number of how many store drivers been loaded. """ store.register_opts(CONF) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") store.create_stores(CONF) class IsolatedUnitTest(StoreClearingUnitTest): """ Unit test case that establishes a mock environment within a testing directory (in isolation) """ registry = None def setUp(self): super(IsolatedUnitTest, self).setUp() options.set_defaults(CONF, connection='sqlite://', sqlite_db='glance.sqlite') lockutils.set_defaults(os.path.join(self.test_dir)) self.config(verbose=False, debug=False) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") store.create_stores() stubs.stub_out_registry_and_store_server(self.stubs, self.test_dir, registry=self.registry) def set_policy_rules(self, rules): fap = open(CONF.oslo_policy.policy_file, 'w') fap.write(jsonutils.dumps(rules)) fap.close() glance-12.0.0/glance/tests/unit/test_scrubber.py0000664000567000056710000001505712701407047022770 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import glance_store from mock import patch from mox3 import mox from oslo_config import cfg # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance import scrubber from glance.tests import utils as test_utils CONF = cfg.CONF class TestScrubber(test_utils.BaseTestCase): def setUp(self): super(TestScrubber, self).setUp() glance_store.register_opts(CONF) self.config(group='glance_store', default_store='file', filesystem_store_datadir=self.test_dir) glance_store.create_stores() self.mox = mox.Mox() def tearDown(self): self.mox.UnsetStubs() # These globals impact state outside of this test class, kill them. scrubber._file_queue = None scrubber._db_queue = None super(TestScrubber, self).tearDown() def _scrubber_cleanup_with_store_delete_exception(self, ex): uri = 'file://some/path/%s' % uuid.uuid4() id = 'helloworldid' scrub = scrubber.Scrubber(glance_store) scrub.registry = self.mox.CreateMockAnything() scrub.registry.get_image(id).AndReturn({'status': 'pending_delete'}) scrub.registry.update_image(id, {'status': 'deleted'}) self.mox.StubOutWithMock(glance_store, "delete_from_backend") glance_store.delete_from_backend( uri, mox.IgnoreArg()).AndRaise(ex) self.mox.ReplayAll() scrub._scrub_image(id, [(id, '-', uri)]) self.mox.VerifyAll() def test_store_delete_successful(self): uri = 'file://some/path/%s' % uuid.uuid4() id = 'helloworldid' scrub = scrubber.Scrubber(glance_store) scrub.registry = self.mox.CreateMockAnything() scrub.registry.get_image(id).AndReturn({'status': 'pending_delete'}) scrub.registry.update_image(id, {'status': 'deleted'}) self.mox.StubOutWithMock(glance_store, "delete_from_backend") glance_store.delete_from_backend(uri, mox.IgnoreArg()).AndReturn('') self.mox.ReplayAll() scrub._scrub_image(id, [(id, '-', uri)]) self.mox.VerifyAll() def test_store_delete_store_exceptions(self): # While scrubbing image data, all store exceptions, other than # NotFound, cause image scrubbing to fail. Essentially, no attempt # would be made to update the status of image. uri = 'file://some/path/%s' % uuid.uuid4() id = 'helloworldid' ex = glance_store.GlanceStoreException() scrub = scrubber.Scrubber(glance_store) scrub.registry = self.mox.CreateMockAnything() self.mox.StubOutWithMock(glance_store, "delete_from_backend") glance_store.delete_from_backend( uri, mox.IgnoreArg()).AndRaise(ex) self.mox.ReplayAll() scrub._scrub_image(id, [(id, '-', uri)]) self.mox.VerifyAll() def test_store_delete_notfound_exception(self): # While scrubbing image data, NotFound exception is ignored and image # scrubbing succeeds uri = 'file://some/path/%s' % uuid.uuid4() id = 'helloworldid' ex = glance_store.NotFound(message='random') scrub = scrubber.Scrubber(glance_store) scrub.registry = self.mox.CreateMockAnything() scrub.registry.get_image(id).AndReturn({'status': 'pending_delete'}) scrub.registry.update_image(id, {'status': 'deleted'}) self.mox.StubOutWithMock(glance_store, "delete_from_backend") glance_store.delete_from_backend(uri, mox.IgnoreArg()).AndRaise(ex) self.mox.ReplayAll() scrub._scrub_image(id, [(id, '-', uri)]) self.mox.VerifyAll() class TestScrubDBQueue(test_utils.BaseTestCase): def setUp(self): super(TestScrubDBQueue, self).setUp() def tearDown(self): super(TestScrubDBQueue, self).tearDown() def _create_image_list(self, count): images = [] for x in range(count): images.append({'id': x}) return images def test_get_all_images(self): scrub_queue = scrubber.ScrubDBQueue() images = self._create_image_list(15) image_pager = ImagePager(images) def make_get_images_detailed(pager): def mock_get_images_detailed(filters, marker=None): return pager() return mock_get_images_detailed with patch.object(scrub_queue.registry, 'get_images_detailed') as ( _mock_get_images_detailed): _mock_get_images_detailed.side_effect = ( make_get_images_detailed(image_pager)) actual = list(scrub_queue._get_all_images()) self.assertEqual(images, actual) def test_get_all_images_paged(self): scrub_queue = scrubber.ScrubDBQueue() images = self._create_image_list(15) image_pager = ImagePager(images, page_size=4) def make_get_images_detailed(pager): def mock_get_images_detailed(filters, marker=None): return pager() return mock_get_images_detailed with patch.object(scrub_queue.registry, 'get_images_detailed') as ( _mock_get_images_detailed): _mock_get_images_detailed.side_effect = ( make_get_images_detailed(image_pager)) actual = list(scrub_queue._get_all_images()) self.assertEqual(images, actual) class ImagePager(object): def __init__(self, images, page_size=0): image_count = len(images) if page_size == 0 or page_size > image_count: page_size = image_count self.image_batches = [] start = 0 l = len(images) while start < l: self.image_batches.append(images[start: start + page_size]) start += page_size if (l - start) < page_size: page_size = l - start def __call__(self): if len(self.image_batches) == 0: return [] else: return self.image_batches.pop(0) glance-12.0.0/glance/tests/unit/test_context.py0000664000567000056710000001410012701407047022631 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance import context from glance.tests.unit import utils as unit_utils from glance.tests import utils def _fake_image(owner, is_public): return { 'id': None, 'owner': owner, 'is_public': is_public, } def _fake_membership(can_share=False): return {'can_share': can_share} class TestContext(utils.BaseTestCase): def setUp(self): super(TestContext, self).setUp() self.db_api = unit_utils.FakeDB() def do_visible(self, exp_res, img_owner, img_public, **kwargs): """ Perform a context visibility test. Creates a (fake) image with the specified owner and is_public attributes, then creates a context with the given keyword arguments and expects exp_res as the result of an is_image_visible() call on the context. """ img = _fake_image(img_owner, img_public) ctx = context.RequestContext(**kwargs) self.assertEqual(exp_res, self.db_api.is_image_visible(ctx, img)) def test_empty_public(self): """ Tests that an empty context (with is_admin set to True) can access an image with is_public set to True. """ self.do_visible(True, None, True, is_admin=True) def test_empty_public_owned(self): """ Tests that an empty context (with is_admin set to True) can access an owned image with is_public set to True. """ self.do_visible(True, 'pattieblack', True, is_admin=True) def test_empty_private(self): """ Tests that an empty context (with is_admin set to True) can access an image with is_public set to False. """ self.do_visible(True, None, False, is_admin=True) def test_empty_private_owned(self): """ Tests that an empty context (with is_admin set to True) can access an owned image with is_public set to False. """ self.do_visible(True, 'pattieblack', False, is_admin=True) def test_anon_public(self): """ Tests that an anonymous context (with is_admin set to False) can access an image with is_public set to True. """ self.do_visible(True, None, True) def test_anon_public_owned(self): """ Tests that an anonymous context (with is_admin set to False) can access an owned image with is_public set to True. """ self.do_visible(True, 'pattieblack', True) def test_anon_private(self): """ Tests that an anonymous context (with is_admin set to False) can access an unowned image with is_public set to False. """ self.do_visible(True, None, False) def test_anon_private_owned(self): """ Tests that an anonymous context (with is_admin set to False) cannot access an owned image with is_public set to False. """ self.do_visible(False, 'pattieblack', False) def test_auth_public(self): """ Tests that an authenticated context (with is_admin set to False) can access an image with is_public set to True. """ self.do_visible(True, None, True, tenant='froggy') def test_auth_public_unowned(self): """ Tests that an authenticated context (with is_admin set to False) can access an image (which it does not own) with is_public set to True. """ self.do_visible(True, 'pattieblack', True, tenant='froggy') def test_auth_public_owned(self): """ Tests that an authenticated context (with is_admin set to False) can access an image (which it does own) with is_public set to True. """ self.do_visible(True, 'pattieblack', True, tenant='pattieblack') def test_auth_private(self): """ Tests that an authenticated context (with is_admin set to False) can access an image with is_public set to False. """ self.do_visible(True, None, False, tenant='froggy') def test_auth_private_unowned(self): """ Tests that an authenticated context (with is_admin set to False) cannot access an image (which it does not own) with is_public set to False. """ self.do_visible(False, 'pattieblack', False, tenant='froggy') def test_auth_private_owned(self): """ Tests that an authenticated context (with is_admin set to False) can access an image (which it does own) with is_public set to False. """ self.do_visible(True, 'pattieblack', False, tenant='pattieblack') def test_request_id(self): contexts = [context.RequestContext().request_id for _ in range(5)] # Check for uniqueness -- set() will normalize its argument self.assertEqual(5, len(set(contexts))) def test_service_catalog(self): ctx = context.RequestContext(service_catalog=['foo']) self.assertEqual(['foo'], ctx.service_catalog) def test_user_identity(self): ctx = context.RequestContext(user="user", tenant="tenant", domain="domain", user_domain="user-domain", project_domain="project-domain") self.assertEqual('user tenant domain user-domain project-domain', ctx.to_dict()["user_identity"]) glance-12.0.0/glance/tests/unit/test_image_cache.py0000664000567000056710000004746612701407047023377 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from contextlib import contextmanager import datetime import hashlib import os import time import fixtures from oslo_utils import units from oslotest import moxstubout import six # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.common import exception from glance import image_cache # NOTE(bcwaldon): This is imported to load the registry config options import glance.registry # noqa from glance.tests import utils as test_utils from glance.tests.utils import skip_if_disabled from glance.tests.utils import xattr_writes_supported FIXTURE_LENGTH = 1024 FIXTURE_DATA = b'*' * FIXTURE_LENGTH class ImageCacheTestCase(object): def _setup_fixture_file(self): FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) self.assertFalse(self.cache.is_cached(1)) self.assertTrue(self.cache.cache_image_file(1, FIXTURE_FILE)) self.assertTrue(self.cache.is_cached(1)) @skip_if_disabled def test_is_cached(self): """Verify is_cached(1) returns 0, then add something to the cache and verify is_cached(1) returns 1. """ self._setup_fixture_file() @skip_if_disabled def test_read(self): """Verify is_cached(1) returns 0, then add something to the cache and verify after a subsequent read from the cache that is_cached(1) returns 1. """ self._setup_fixture_file() buff = six.BytesIO() with self.cache.open_for_read(1) as cache_file: for chunk in cache_file: buff.write(chunk) self.assertEqual(FIXTURE_DATA, buff.getvalue()) @skip_if_disabled def test_open_for_read(self): """Test convenience wrapper for opening a cache file via its image identifier. """ self._setup_fixture_file() buff = six.BytesIO() with self.cache.open_for_read(1) as cache_file: for chunk in cache_file: buff.write(chunk) self.assertEqual(FIXTURE_DATA, buff.getvalue()) @skip_if_disabled def test_get_image_size(self): """Test convenience wrapper for querying cache file size via its image identifier. """ self._setup_fixture_file() size = self.cache.get_image_size(1) self.assertEqual(FIXTURE_LENGTH, size) @skip_if_disabled def test_delete(self): """Test delete method that removes an image from the cache.""" self._setup_fixture_file() self.cache.delete_cached_image(1) self.assertFalse(self.cache.is_cached(1)) @skip_if_disabled def test_delete_all(self): """Test delete method that removes an image from the cache.""" for image_id in (1, 2): self.assertFalse(self.cache.is_cached(image_id)) for image_id in (1, 2): FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file(image_id, FIXTURE_FILE)) for image_id in (1, 2): self.assertTrue(self.cache.is_cached(image_id)) self.cache.delete_all_cached_images() for image_id in (1, 2): self.assertFalse(self.cache.is_cached(image_id)) @skip_if_disabled def test_clean_stalled(self): """Test the clean method removes expected images.""" incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', '1') incomplete_file = open(incomplete_file_path, 'wb') incomplete_file.write(FIXTURE_DATA) incomplete_file.close() self.assertTrue(os.path.exists(incomplete_file_path)) self.cache.clean(stall_time=0) self.assertFalse(os.path.exists(incomplete_file_path)) @skip_if_disabled def test_clean_stalled_nonzero_stall_time(self): """ Test the clean method removes the stalled images as expected """ incomplete_file_path_1 = os.path.join(self.cache_dir, 'incomplete', '1') incomplete_file_path_2 = os.path.join(self.cache_dir, 'incomplete', '2') for f in (incomplete_file_path_1, incomplete_file_path_2): incomplete_file = open(f, 'wb') incomplete_file.write(FIXTURE_DATA) incomplete_file.close() mtime = os.path.getmtime(incomplete_file_path_1) pastday = (datetime.datetime.fromtimestamp(mtime) - datetime.timedelta(days=1)) atime = int(time.mktime(pastday.timetuple())) mtime = atime os.utime(incomplete_file_path_1, (atime, mtime)) self.assertTrue(os.path.exists(incomplete_file_path_1)) self.assertTrue(os.path.exists(incomplete_file_path_2)) self.cache.clean(stall_time=3600) self.assertFalse(os.path.exists(incomplete_file_path_1)) self.assertTrue(os.path.exists(incomplete_file_path_2)) @skip_if_disabled def test_prune(self): """ Test that pruning the cache works as expected... """ self.assertEqual(0, self.cache.get_cache_size()) # Add a bunch of images to the cache. The max cache size for the cache # is set to 5KB and each image is 1K. We use 11 images in this test. # The first 10 are added to and retrieved from cache in the same order. # Then, the 11th image is added to cache but not retrieved before we # prune. We should see only 5 images left after pruning, and the # images that are least recently accessed should be the ones pruned... for x in range(10): FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file(x, FIXTURE_FILE)) self.assertEqual(10 * units.Ki, self.cache.get_cache_size()) # OK, hit the images that are now cached... for x in range(10): buff = six.BytesIO() with self.cache.open_for_read(x) as cache_file: for chunk in cache_file: buff.write(chunk) # Add a new image to cache. # This is specifically to test the bug: 1438564 FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file(99, FIXTURE_FILE)) self.cache.prune() self.assertEqual(5 * units.Ki, self.cache.get_cache_size()) # Ensure images 0, 1, 2, 3, 4 & 5 are not cached anymore for x in range(0, 6): self.assertFalse(self.cache.is_cached(x), "Image %s was cached!" % x) # Ensure images 6, 7, 8 and 9 are still cached for x in range(6, 10): self.assertTrue(self.cache.is_cached(x), "Image %s was not cached!" % x) # Ensure the newly added image, 99, is still cached self.assertTrue(self.cache.is_cached(99), "Image 99 was not cached!") @skip_if_disabled def test_prune_to_zero(self): """Test that an image_cache_max_size of 0 doesn't kill the pruner This is a test specifically for LP #1039854 """ self.assertEqual(0, self.cache.get_cache_size()) FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.cache_image_file('xxx', FIXTURE_FILE)) self.assertEqual(1024, self.cache.get_cache_size()) # OK, hit the image that is now cached... buff = six.BytesIO() with self.cache.open_for_read('xxx') as cache_file: for chunk in cache_file: buff.write(chunk) self.config(image_cache_max_size=0) self.cache.prune() self.assertEqual(0, self.cache.get_cache_size()) self.assertFalse(self.cache.is_cached('xxx')) @skip_if_disabled def test_queue(self): """ Test that queueing works properly """ self.assertFalse(self.cache.is_cached(1)) self.assertFalse(self.cache.is_queued(1)) FIXTURE_FILE = six.BytesIO(FIXTURE_DATA) self.assertTrue(self.cache.queue_image(1)) self.assertTrue(self.cache.is_queued(1)) self.assertFalse(self.cache.is_cached(1)) # Should not return True if the image is already # queued for caching... self.assertFalse(self.cache.queue_image(1)) self.assertFalse(self.cache.is_cached(1)) # Test that we return False if we try to queue # an image that has already been cached self.assertTrue(self.cache.cache_image_file(1, FIXTURE_FILE)) self.assertFalse(self.cache.is_queued(1)) self.assertTrue(self.cache.is_cached(1)) self.assertFalse(self.cache.queue_image(1)) self.cache.delete_cached_image(1) for x in range(3): self.assertTrue(self.cache.queue_image(x)) self.assertEqual(['0', '1', '2'], self.cache.get_queued_images()) def test_open_for_write_good(self): """ Test to see if open_for_write works in normal case """ # test a good case image_id = '1' self.assertFalse(self.cache.is_cached(image_id)) with self.cache.driver.open_for_write(image_id) as cache_file: cache_file.write(b'a') self.assertTrue(self.cache.is_cached(image_id), "Image %s was NOT cached!" % image_id) # make sure it has tidied up incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertFalse(os.path.exists(invalid_file_path)) def test_open_for_write_with_exception(self): """ Test to see if open_for_write works in a failure case for each driver This case is where an exception is raised while the file is being written. The image is partially filled in cache and filling wont resume so verify the image is moved to invalid/ directory """ # test a case where an exception is raised while the file is open image_id = '1' self.assertFalse(self.cache.is_cached(image_id)) try: with self.cache.driver.open_for_write(image_id): raise IOError except Exception as e: self.assertIsInstance(e, IOError) self.assertFalse(self.cache.is_cached(image_id), "Image %s was cached!" % image_id) # make sure it has tidied up incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertTrue(os.path.exists(invalid_file_path)) def test_caching_iterator(self): """ Test to see if the caching iterator interacts properly with the driver When the iterator completes going through the data the driver should have closed the image and placed it correctly """ # test a case where an exception NOT raised while the file is open, # and a consuming iterator completes def consume(image_id): data = [b'a', b'b', b'c', b'd', b'e', b'f'] checksum = None caching_iter = self.cache.get_caching_iter(image_id, checksum, iter(data)) self.assertEqual(data, list(caching_iter)) image_id = '1' self.assertFalse(self.cache.is_cached(image_id)) consume(image_id) self.assertTrue(self.cache.is_cached(image_id), "Image %s was NOT cached!" % image_id) # make sure it has tidied up incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertFalse(os.path.exists(invalid_file_path)) def test_caching_iterator_handles_backend_failure(self): """ Test that when the backend fails, caching_iter does not continue trying to consume data, and rolls back the cache. """ def faulty_backend(): data = [b'a', b'b', b'c', b'Fail', b'd', b'e', b'f'] for d in data: if d == b'Fail': raise exception.GlanceException('Backend failure') yield d def consume(image_id): caching_iter = self.cache.get_caching_iter(image_id, None, faulty_backend()) # exercise the caching_iter list(caching_iter) image_id = '1' self.assertRaises(exception.GlanceException, consume, image_id) # make sure bad image was not cached self.assertFalse(self.cache.is_cached(image_id)) def test_caching_iterator_falloffend(self): """ Test to see if the caching iterator interacts properly with the driver in a case where the iterator is only partially consumed. In this case the image is only partially filled in cache and filling wont resume. When the iterator goes out of scope the driver should have closed the image and moved it from incomplete/ to invalid/ """ # test a case where a consuming iterator just stops. def falloffend(image_id): data = [b'a', b'b', b'c', b'd', b'e', b'f'] checksum = None caching_iter = self.cache.get_caching_iter(image_id, checksum, iter(data)) self.assertEqual(b'a', next(caching_iter)) image_id = '1' self.assertFalse(self.cache.is_cached(image_id)) falloffend(image_id) self.assertFalse(self.cache.is_cached(image_id), "Image %s was cached!" % image_id) # make sure it has tidied up incomplete_file_path = os.path.join(self.cache_dir, 'incomplete', image_id) invalid_file_path = os.path.join(self.cache_dir, 'invalid', image_id) self.assertFalse(os.path.exists(incomplete_file_path)) self.assertTrue(os.path.exists(invalid_file_path)) def test_gate_caching_iter_good_checksum(self): image = b"12345678990abcdefghijklmnop" image_id = 123 md5 = hashlib.md5() md5.update(image) checksum = md5.hexdigest() cache = image_cache.ImageCache() img_iter = cache.get_caching_iter(image_id, checksum, [image]) for chunk in img_iter: pass # checksum is valid, fake image should be cached: self.assertTrue(cache.is_cached(image_id)) def test_gate_caching_iter_bad_checksum(self): image = b"12345678990abcdefghijklmnop" image_id = 123 checksum = "foobar" # bad. cache = image_cache.ImageCache() img_iter = cache.get_caching_iter(image_id, checksum, [image]) def reader(): for chunk in img_iter: pass self.assertRaises(exception.GlanceException, reader) # checksum is invalid, caching will fail: self.assertFalse(cache.is_cached(image_id)) class TestImageCacheXattr(test_utils.BaseTestCase, ImageCacheTestCase): """Tests image caching when xattr is used in cache""" def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ super(TestImageCacheXattr, self).setUp() if getattr(self, 'disable', False): return self.cache_dir = self.useFixture(fixtures.TempDir()).path if not getattr(self, 'inited', False): try: import xattr # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-xattr not installed.") return self.inited = True self.disabled = False self.config(image_cache_dir=self.cache_dir, image_cache_driver='xattr', image_cache_max_size=5 * units.Ki) self.cache = image_cache.ImageCache() if not xattr_writes_supported(self.cache_dir): self.inited = True self.disabled = True self.disabled_message = ("filesystem does not support xattr") return class TestImageCacheSqlite(test_utils.BaseTestCase, ImageCacheTestCase): """Tests image caching when SQLite is used in cache""" def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-sqlite3 installed) """ super(TestImageCacheSqlite, self).setUp() if getattr(self, 'disable', False): return if not getattr(self, 'inited', False): try: import sqlite3 # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-sqlite3 not installed.") return self.inited = True self.disabled = False self.cache_dir = self.useFixture(fixtures.TempDir()).path self.config(image_cache_dir=self.cache_dir, image_cache_driver='sqlite', image_cache_max_size=5 * units.Ki) self.cache = image_cache.ImageCache() class TestImageCacheNoDep(test_utils.BaseTestCase): def setUp(self): super(TestImageCacheNoDep, self).setUp() self.driver = None def init_driver(self2): self2.driver = self.driver mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.stubs = mox_fixture.stubs self.stubs.Set(image_cache.ImageCache, 'init_driver', init_driver) def test_get_caching_iter_when_write_fails(self): class FailingFile(object): def write(self, data): if data == "Fail": raise IOError class FailingFileDriver(object): def is_cacheable(self, *args, **kwargs): return True @contextmanager def open_for_write(self, *args, **kwargs): yield FailingFile() self.driver = FailingFileDriver() cache = image_cache.ImageCache() data = [b'a', b'b', b'c', b'Fail', b'd', b'e', b'f'] caching_iter = cache.get_caching_iter('dummy_id', None, iter(data)) self.assertEqual(data, list(caching_iter)) def test_get_caching_iter_when_open_fails(self): class OpenFailingDriver(object): def is_cacheable(self, *args, **kwargs): return True @contextmanager def open_for_write(self, *args, **kwargs): raise IOError self.driver = OpenFailingDriver() cache = image_cache.ImageCache() data = [b'a', b'b', b'c', b'd', b'e', b'f'] caching_iter = cache.get_caching_iter('dummy_id', None, iter(data)) self.assertEqual(data, list(caching_iter)) glance-12.0.0/glance/tests/unit/async/0000775000567000056710000000000012701407204020650 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/async/test_async.py0000664000567000056710000000323012701407047023401 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import glance.async import glance.tests.utils as test_utils class TestTaskExecutor(test_utils.BaseTestCase): def setUp(self): super(TestTaskExecutor, self).setUp() self.context = mock.Mock() self.task_repo = mock.Mock() self.image_repo = mock.Mock() self.image_factory = mock.Mock() self.executor = glance.async.TaskExecutor(self.context, self.task_repo, self.image_repo, self.image_factory) def test_begin_processing(self): # setup task_id = mock.ANY task_type = mock.ANY task = mock.Mock() with mock.patch.object( glance.async.TaskExecutor, '_run') as mock_run: self.task_repo.get.return_value = task self.executor.begin_processing(task_id) # assert the call mock_run.assert_called_once_with(task_id, task_type) glance-12.0.0/glance/tests/unit/async/__init__.py0000664000567000056710000000000012701407047022754 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/async/flows/0000775000567000056710000000000012701407204022002 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/async/flows/test_introspect.py0000664000567000056710000001015012701407047025607 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock import glance_store from oslo_concurrency import processutils from oslo_config import cfg from glance.async.flows import introspect from glance import domain import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestImportTask(test_utils.BaseTestCase): def setUp(self): super(TestImportTask, self).setUp() self.task_factory = domain.TaskFactory() task_input = { "import_from": "http://cloud.foo/image.qcow2", "import_from_format": "qcow2", "image_properties": mock.sentinel.image_properties } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' self.task = self.task_factory.new_task(self.task_type, TENANT1, task_time_to_live=task_ttl, task_input=task_input) self.context = mock.Mock() self.img_repo = mock.Mock() self.task_repo = mock.Mock() self.img_factory = mock.Mock() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores(CONF) def test_introspect_success(self): image_create = introspect._Introspect(self.task.task_id, self.task_type, self.img_repo) self.task_repo.get.return_value = self.task image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id) self.img_repo.get.return_value = image with mock.patch.object(processutils, 'execute') as exc_mock: result = json.dumps({ "virtual-size": 10737418240, "filename": "/tmp/image.qcow2", "cluster-size": 65536, "format": "qcow2", "actual-size": 373030912, "format-specific": { "type": "qcow2", "data": { "compat": "0.10" } }, "dirty-flag": False }) exc_mock.return_value = (result, None) image_create.execute(image, '/test/path.qcow2') self.assertEqual(10737418240, image.virtual_size) def test_introspect_no_image(self): image_create = introspect._Introspect(self.task.task_id, self.task_type, self.img_repo) self.task_repo.get.return_value = self.task image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id, virtual_size=None) self.img_repo.get.return_value = image # NOTE(flaper87): Don't mock, test the error. with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = (None, "some error") # NOTE(flaper87): Pls, read the `OptionalTask._catch_all` # docs to know why this is commented. # self.assertRaises(RuntimeError, # image_create.execute, # image, '/test/path.qcow2') image_create.execute(image, '/test/path.qcow2') self.assertIsNone(image.virtual_size) glance-12.0.0/glance/tests/unit/async/flows/__init__.py0000664000567000056710000000000012701407047024106 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/async/flows/test_ovf_process.py0000664000567000056710000001602612701407047025755 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import shutil import tarfile import tempfile import mock try: from xml.etree.cElementTree import ParseError except ImportError: from xml.etree.ElementTree import ParseError from glance.async.flows import ovf_process import glance.tests.utils as test_utils from oslo_config import cfg class TestOvfProcessTask(test_utils.BaseTestCase): def setUp(self): super(TestOvfProcessTask, self).setUp() # The glance/tests/var dir containing sample ova packages used # by the tests in this class self.test_ova_dir = os.path.abspath(os.path.join( os.path.dirname(__file__), '../../../', 'var')) self.tempdir = tempfile.mkdtemp() self.config(work_dir=self.tempdir, group="task") # These are the properties that we will extract from the ovf # file contained in a ova package interested_properties = ( '{\n' ' "cim_pasd": [\n' ' "InstructionSetExtensionName",\n' ' "ProcessorArchitecture"]\n' '}\n') self.config_file_name = os.path.join(self.tempdir, 'ovf-metadata.json') with open(self.config_file_name, 'w') as config_file: config_file.write(interested_properties) self.image = mock.Mock() self.image.container_format = 'ova' self.image.context.is_admin = True self.img_repo = mock.Mock() self.img_repo.get.return_value = self.image def tearDown(self): if os.path.exists(self.tempdir): shutil.rmtree(self.tempdir) super(TestOvfProcessTask, self).tearDown() def _copy_ova_to_tmpdir(self, ova_name): # Copies an ova pacakge to the tempdir for tempdir from where # the system-under-test will read it from shutil.copy(os.path.join(self.test_ova_dir, ova_name), self.tempdir) return os.path.join(self.tempdir, ova_name) @mock.patch.object(cfg.ConfigOpts, 'find_file') def test_ovf_process_success(self, mock_find_file): mock_find_file.return_value = self.config_file_name ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') ova_uri = 'file://' + ova_file_path oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', self.img_repo) self.assertEqual(ova_uri, oprocess.execute('test_image_id', ova_uri)) # Note that the extracted disk image is overwritten onto the input ova # file with open(ova_file_path, 'rb') as disk_image_file: content = disk_image_file.read() # b'ABCD' is the exact contents of the disk image file # testserver-disk1.vmdk contained in the testserver.ova package used # by this test self.assertEqual(b'ABCD', content) # 'DMTF:x86:VT-d' is the value in the testerver.ovf file in the # testserver.ova package self.image.extra_properties.update.assert_called_once_with( {'cim_pasd_InstructionSetExtensionName': 'DMTF:x86:VT-d'}) self.assertEqual('bare', self.image.container_format) @mock.patch.object(cfg.ConfigOpts, 'find_file') def test_ovf_process_no_config_file(self, mock_find_file): # Mimics a Glance deployment without the ovf-metadata.json file mock_find_file.return_value = None ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') ova_uri = 'file://' + ova_file_path oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', self.img_repo) self.assertEqual(ova_uri, oprocess.execute('test_image_id', ova_uri)) # Note that the extracted disk image is overwritten onto the input # ova file. with open(ova_file_path, 'rb') as disk_image_file: content = disk_image_file.read() # b'ABCD' is the exact contents of the disk image file # testserver-disk1.vmdk contained in the testserver.ova package used # by this test self.assertEqual(b'ABCD', content) # No properties must be selected from the ovf file self.image.extra_properties.update.assert_called_once_with({}) self.assertEqual('bare', self.image.container_format) @mock.patch.object(cfg.ConfigOpts, 'find_file') def test_ovf_process_not_admin(self, mock_find_file): mock_find_file.return_value = self.config_file_name ova_file_path = self._copy_ova_to_tmpdir('testserver.ova') ova_uri = 'file://' + ova_file_path self.image.context.is_admin = False oprocess = ovf_process._OVF_Process('task_id', 'ovf_proc', self.img_repo) self.assertRaises(RuntimeError, oprocess.execute, 'test_image_id', ova_uri) def test_extract_ova_not_tar(self): # testserver-not-tar.ova package is not in tar format ova_file_path = os.path.join(self.test_ova_dir, 'testserver-not-tar.ova') iextractor = ovf_process.OVAImageExtractor() with open(ova_file_path, 'rb') as ova_file: self.assertRaises(tarfile.ReadError, iextractor.extract, ova_file) def test_extract_ova_no_disk(self): # testserver-no-disk.ova package contains no disk image file ova_file_path = os.path.join(self.test_ova_dir, 'testserver-no-disk.ova') iextractor = ovf_process.OVAImageExtractor() with open(ova_file_path, 'rb') as ova_file: self.assertRaises(KeyError, iextractor.extract, ova_file) def test_extract_ova_no_ovf(self): # testserver-no-ovf.ova package contains no ovf file ova_file_path = os.path.join(self.test_ova_dir, 'testserver-no-ovf.ova') iextractor = ovf_process.OVAImageExtractor() with open(ova_file_path, 'rb') as ova_file: self.assertRaises(RuntimeError, iextractor.extract, ova_file) def test_extract_ova_bad_ovf(self): # testserver-bad-ovf.ova package has an ovf file that contains # invalid xml ova_file_path = os.path.join(self.test_ova_dir, 'testserver-bad-ovf.ova') iextractor = ovf_process.OVAImageExtractor() with open(ova_file_path, 'rb') as ova_file: self.assertRaises(ParseError, iextractor._parse_OVF, ova_file) glance-12.0.0/glance/tests/unit/async/flows/test_convert.py0000664000567000056710000001543712701407047025112 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock import os import glance_store from oslo_concurrency import processutils from oslo_config import cfg import six from glance.async.flows import convert from glance.async import taskflow_executor from glance.common.scripts import utils as script_utils from glance.common import utils from glance import domain from glance import gateway import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestImportTask(test_utils.BaseTestCase): def setUp(self): super(TestImportTask, self).setUp() self.work_dir = os.path.join(self.test_dir, 'work_dir') utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group='task') self.context = mock.MagicMock() self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=UUID1, disk_format='raw', container_format='bare') task_input = { "import_from": "http://cloud.foo/image.raw", "import_from_format": "raw", "image_properties": {'disk_format': 'qcow2', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' self.task = self.task_factory.new_task(self.task_type, TENANT1, task_time_to_live=task_ttl, task_input=task_input) glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") self.config(conversion_format='qcow2', group='taskflow_executor') glance_store.create_stores(CONF) def test_convert_success(self): image_convert = convert._Convert(self.task.task_id, self.task_type, self.img_repo) self.task_repo.get.return_value = self.task image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id, virtual_size=None) self.img_repo.get.return_value = image with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ("", None) with mock.patch.object(os, 'rename') as rm_mock: rm_mock.return_value = None image_convert.execute(image, 'file:///test/path.raw') def test_convert_revert_success(self): image_convert = convert._Convert(self.task.task_id, self.task_type, self.img_repo) self.task_repo.get.return_value = self.task image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id, virtual_size=None) self.img_repo.get.return_value = image with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.return_value = ("", None) with mock.patch.object(os, 'remove') as rmtree_mock: rmtree_mock.return_value = None image_convert.revert(image, 'file:///tmp/test') def test_import_flow_with_convert_and_introspect(self): self.config(engine_mode='serial', group='taskflow_executor') image = self.img_factory.new_image(image_id=UUID1, disk_format='raw', container_format='bare') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = image img_factory.new_image.side_effect = create_image image_path = os.path.join(self.work_dir, image.image_id) def fake_execute(*args, **kwargs): if 'info' in args: # NOTE(flaper87): Make sure the file actually # exists. Extra check to verify previous tasks did # what they were supposed to do. assert os.path.exists(args[3].split("file://")[-1]) return (json.dumps({ "virtual-size": 10737418240, "filename": "/tmp/image.qcow2", "cluster-size": 65536, "format": "qcow2", "actual-size": 373030912, "format-specific": { "type": "qcow2", "data": { "compat": "0.10" } }, "dirty-flag": False }), None) open("%s.converted" % image_path, 'a').close() return ("", None) with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = six.BytesIO(b"TEST_IMAGE") with mock.patch.object(processutils, 'execute') as exc_mock: exc_mock.side_effect = fake_execute executor.begin_processing(self.task.task_id) # NOTE(flaper87): DeleteFromFS should've deleted this # file. Make sure it doesn't exist. self.assertFalse(os.path.exists(image_path)) # NOTE(flaper87): Workdir should be empty after all # the tasks have been executed. self.assertEqual([], os.listdir(self.work_dir)) self.assertEqual('qcow2', image.disk_format) self.assertEqual(10737418240, image.virtual_size) glance-12.0.0/glance/tests/unit/async/flows/test_import.py0000664000567000056710000004144512701407047024742 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock import os import glance_store from oslo_concurrency import processutils as putils from oslo_config import cfg import six from six.moves import urllib from taskflow import task from taskflow.types import failure import glance.async.flows.base_import as import_flow from glance.async import taskflow_executor from glance.common.scripts.image_import import main as image_import from glance.common.scripts import utils as script_utils from glance.common import utils from glance import domain from glance import gateway import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class _ErrorTask(task.Task): def execute(self): raise RuntimeError() class TestImportTask(test_utils.BaseTestCase): def setUp(self): super(TestImportTask, self).setUp() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores(CONF) self.work_dir = os.path.join(self.test_dir, 'work_dir') utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group='task') self.context = mock.MagicMock() self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=UUID1, disk_format='qcow2', container_format='bare') task_input = { "import_from": "http://cloud.foo/image.qcow2", "import_from_format": "qcow2", "image_properties": {'disk_format': 'qcow2', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' self.task = self.task_factory.new_task(self.task_type, TENANT1, task_time_to_live=task_ttl, task_input=task_input) def test_import_flow(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = six.BytesIO(b"TEST_IMAGE") with mock.patch.object(putils, 'trycmd') as tmock: tmock.return_value = (json.dumps({ 'format': 'qcow2', }), None) executor.begin_processing(self.task.task_id) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, "%s.tasks_import" % image_path) self.assertFalse(os.path.exists(tmp_image_path)) self.assertTrue(os.path.exists(image_path)) self.assertEqual(1, len(list(self.image.locations))) self.assertEqual("file://%s/%s" % (self.test_dir, self.image.image_id), self.image.locations[0]['url']) def test_import_flow_missing_work_dir(self): self.config(engine_mode='serial', group='taskflow_executor') self.config(work_dir=None, group='task') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = six.BytesIO(b"TEST_IMAGE") with mock.patch.object(import_flow._ImportToFS, 'execute') as emk: executor.begin_processing(self.task.task_id) self.assertFalse(emk.called) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, "%s.tasks_import" % image_path) self.assertFalse(os.path.exists(tmp_image_path)) self.assertTrue(os.path.exists(image_path)) def test_import_flow_revert_import_to_fs(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.side_effect = RuntimeError with mock.patch.object(import_flow._ImportToFS, 'revert') as rmock: self.assertRaises(RuntimeError, executor.begin_processing, self.task.task_id) self.assertTrue(rmock.called) self.assertIsInstance(rmock.call_args[1]['result'], failure.Failure) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, "%s.tasks_import" % image_path) self.assertFalse(os.path.exists(tmp_image_path)) # Note(sabari): The image should not have been uploaded to # the store as the flow failed before ImportToStore Task. self.assertFalse(os.path.exists(image_path)) def test_import_flow_backed_file_import_to_fs(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = six.BytesIO(b"TEST_IMAGE") with mock.patch.object(putils, 'trycmd') as tmock: tmock.return_value = (json.dumps({ 'backing-filename': '/etc/password' }), None) with mock.patch.object(import_flow._ImportToFS, 'revert') as rmock: self.assertRaises(RuntimeError, executor.begin_processing, self.task.task_id) self.assertTrue(rmock.called) self.assertIsInstance(rmock.call_args[1]['result'], failure.Failure) image_path = os.path.join(self.test_dir, self.image.image_id) fname = "%s.tasks_import" % image_path tmp_image_path = os.path.join(self.work_dir, fname) self.assertFalse(os.path.exists(tmp_image_path)) # Note(sabari): The image should not have been uploaded to # the store as the flow failed before ImportToStore Task. self.assertFalse(os.path.exists(image_path)) def test_import_flow_revert(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: dmock.return_value = six.BytesIO(b"TEST_IMAGE") with mock.patch.object(putils, 'trycmd') as tmock: tmock.return_value = (json.dumps({ 'format': 'qcow2', }), None) with mock.patch.object(import_flow, "_get_import_flows") as imock: imock.return_value = (x for x in [_ErrorTask()]) self.assertRaises(RuntimeError, executor.begin_processing, self.task.task_id) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, ("%s.tasks_import" % image_path)) self.assertFalse(os.path.exists(tmp_image_path)) # NOTE(flaper87): Eventually, we want this to be assertTrue # The current issue is there's no way to tell taskflow to # continue on failures. That is, revert the subflow but # keep executing the parent flow. Under # discussion/development. self.assertFalse(os.path.exists(image_path)) def test_import_flow_no_import_flows(self): self.config(engine_mode='serial', group='taskflow_executor') img_factory = mock.MagicMock() executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.img_repo, img_factory) self.task_repo.get.return_value = self.task def create_image(*args, **kwargs): kwargs['image_id'] = UUID1 return self.img_factory.new_image(*args, **kwargs) self.img_repo.get.return_value = self.image img_factory.new_image.side_effect = create_image with mock.patch.object(urllib.request, 'urlopen') as umock: content = b"TEST_IMAGE" umock.return_value = six.BytesIO(content) with mock.patch.object(import_flow, "_get_import_flows") as imock: imock.return_value = (x for x in []) executor.begin_processing(self.task.task_id) image_path = os.path.join(self.test_dir, self.image.image_id) tmp_image_path = os.path.join(self.work_dir, "%s.tasks_import" % image_path) self.assertFalse(os.path.exists(tmp_image_path)) self.assertTrue(os.path.exists(image_path)) self.assertEqual(1, umock.call_count) with open(image_path, 'rb') as ifile: self.assertEqual(content, ifile.read()) def test_create_image(self): image_create = import_flow._CreateImage(self.task.task_id, self.task_type, self.task_repo, self.img_repo, self.img_factory) self.task_repo.get.return_value = self.task with mock.patch.object(image_import, 'create_image') as ci_mock: ci_mock.return_value = mock.Mock() image_create.execute() ci_mock.assert_called_once_with(self.img_repo, self.img_factory, {'container_format': 'bare', 'disk_format': 'qcow2'}, self.task.task_id) def test_save_image(self): save_image = import_flow._SaveImage(self.task.task_id, self.task_type, self.img_repo) with mock.patch.object(self.img_repo, 'get') as get_mock: image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id, status='saving') get_mock.return_value = image with mock.patch.object(self.img_repo, 'save') as save_mock: save_image.execute(image.image_id) get_mock.assert_called_once_with(image_id) save_mock.assert_called_once_with(image) self.assertEqual('active', image.status) def test_import_to_fs(self): import_fs = import_flow._ImportToFS(self.task.task_id, self.task_type, self.task_repo, 'http://example.com/image.qcow2') with mock.patch.object(script_utils, 'get_image_data_iter') as dmock: content = b"test" dmock.return_value = [content] with mock.patch.object(putils, 'trycmd') as tmock: tmock.return_value = (json.dumps({ 'format': 'qcow2', }), None) image_id = UUID1 path = import_fs.execute(image_id) reader, size = glance_store.get_from_backend(path) self.assertEqual(4, size) self.assertEqual(content, b"".join(reader)) image_path = os.path.join(self.work_dir, image_id) tmp_image_path = os.path.join(self.work_dir, image_path) self.assertTrue(os.path.exists(tmp_image_path)) def test_delete_from_fs(self): delete_fs = import_flow._DeleteFromFS(self.task.task_id, self.task_type) data = [b"test"] store = glance_store.get_store_from_scheme('file') path = glance_store.store_add_to_backend(mock.sentinel.image_id, data, mock.sentinel.image_size, store, context=None)[0] path_wo_scheme = path.split("file://")[1] self.assertTrue(os.path.exists(path_wo_scheme)) delete_fs.execute(path) self.assertFalse(os.path.exists(path_wo_scheme)) def test_complete_task(self): complete_task = import_flow._CompleteTask(self.task.task_id, self.task_type, self.task_repo) image_id = mock.sentinel.image_id image = mock.MagicMock(image_id=image_id) self.task_repo.get.return_value = self.task with mock.patch.object(self.task, 'succeed') as succeed: complete_task.execute(image.image_id) succeed.assert_called_once_with({'image_id': image_id}) glance-12.0.0/glance/tests/unit/async/test_taskflow_executor.py0000664000567000056710000000625612701407047026047 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import glance_store from oslo_config import cfg from taskflow import engines from glance.async import taskflow_executor from glance import domain import glance.tests.utils as test_utils CONF = cfg.CONF TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestTaskExecutor(test_utils.BaseTestCase): def setUp(self): super(TestTaskExecutor, self).setUp() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores(CONF) self.config(engine_mode='serial', group='taskflow_executor') self.context = mock.Mock() self.task_repo = mock.Mock() self.image_repo = mock.Mock() self.image_factory = mock.Mock() task_input = { "import_from": "http://cloud.foo/image.qcow2", "import_from_format": "qcow2", "image_properties": {'disk_format': 'qcow2', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' self.task_factory = domain.TaskFactory() self.task = self.task_factory.new_task(self.task_type, TENANT1, task_time_to_live=task_ttl, task_input=task_input) self.executor = taskflow_executor.TaskExecutor( self.context, self.task_repo, self.image_repo, self.image_factory) def test_begin_processing(self): with mock.patch.object(engines, 'load') as load_mock: engine = mock.Mock() load_mock.return_value = engine self.task_repo.get.return_value = self.task self.executor.begin_processing(self.task.task_id) # assert the call self.assertEqual(1, load_mock.call_count) self.assertEqual(1, engine.run.call_count) def test_task_fail(self): with mock.patch.object(engines, 'load') as load_mock: engine = mock.Mock() load_mock.return_value = engine engine.run.side_effect = RuntimeError self.task_repo.get.return_value = self.task self.assertRaises(RuntimeError, self.executor.begin_processing, self.task.task_id) self.assertEqual('failure', self.task.status) self.task_repo.save.assert_called_with(self.task) glance-12.0.0/glance/tests/unit/test_db.py0000664000567000056710000007466012701407047021553 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import encodeutils from glance.common import crypt from glance.common import exception import glance.context import glance.db from glance.db.sqlalchemy import api import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils CONF = cfg.CONF CONF.import_opt('metadata_encryption_key', 'glance.common.config') @mock.patch('oslo_utils.importutils.import_module') class TestDbUtilities(test_utils.BaseTestCase): def setUp(self): super(TestDbUtilities, self).setUp() self.config(data_api='silly pants') self.api = mock.Mock() def test_get_api_calls_configure_if_present(self, import_module): import_module.return_value = self.api self.assertEqual(glance.db.get_api(), self.api) import_module.assert_called_once_with('silly pants') self.api.configure.assert_called_once_with() def test_get_api_skips_configure_if_missing(self, import_module): import_module.return_value = self.api del self.api.configure self.assertEqual(glance.db.get_api(), self.api) import_module.assert_called_once_with('silly pants') self.assertFalse(hasattr(self.api, 'configure')) UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' UUID1_LOCATION = 'file:///path/to/image' UUID1_LOCATION_METADATA = {'key': 'value'} UUID3_LOCATION = 'http://somehost.com/place' CHECKSUM = '93264c3edf5972c9f1cb309543d38a5c' CHCKSUM1 = '43264c3edf4972c9f1cb309543d38a55' def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'is_public': False, 'properties': {}, 'checksum': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } obj.update(kwargs) return obj def _db_image_member_fixture(image_id, member_id, **kwargs): obj = { 'image_id': image_id, 'member': member_id, } obj.update(kwargs) return obj def _db_task_fixture(task_id, type, status, **kwargs): obj = { 'id': task_id, 'type': type, 'status': status, 'input': None, 'result': None, 'owner': None, 'message': None, 'deleted': False, } obj.update(kwargs) return obj class TestImageRepo(test_utils.BaseTestCase): def setUp(self): super(TestImageRepo, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext( user=USER1, tenant=TENANT1) self.image_repo = glance.db.ImageRepo(self.context, self.db) self.image_factory = glance.domain.ImageFactory() self._create_images() self._create_image_members() def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, checksum=CHECKSUM, name='1', size=256, is_public=True, status='active', locations=[{'url': UUID1_LOCATION, 'metadata': UUID1_LOCATION_METADATA, 'status': 'active'}]), _db_fixture(UUID2, owner=TENANT1, checksum=CHCKSUM1, name='2', size=512, is_public=False), _db_fixture(UUID3, owner=TENANT3, checksum=CHCKSUM1, name='3', size=1024, is_public=True, locations=[{'url': UUID3_LOCATION, 'metadata': {}, 'status': 'active'}]), _db_fixture(UUID4, owner=TENANT4, name='4', size=2048), ] [self.db.image_create(None, image) for image in self.images] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID2, TENANT2), _db_image_member_fixture(UUID2, TENANT3, status='accepted'), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_get(self): image = self.image_repo.get(UUID1) self.assertEqual(UUID1, image.image_id) self.assertEqual('1', image.name) self.assertEqual(set(['ping', 'pong']), image.tags) self.assertEqual('public', image.visibility) self.assertEqual('active', image.status) self.assertEqual(256, image.size) self.assertEqual(TENANT1, image.owner) def test_location_value(self): image = self.image_repo.get(UUID3) self.assertEqual(UUID3_LOCATION, image.locations[0]['url']) def test_location_data_value(self): image = self.image_repo.get(UUID1) self.assertEqual(UUID1_LOCATION, image.locations[0]['url']) self.assertEqual(UUID1_LOCATION_METADATA, image.locations[0]['metadata']) def test_location_data_exists(self): image = self.image_repo.get(UUID2) self.assertEqual([], image.locations) def test_get_not_found(self): fake_uuid = str(uuid.uuid4()) exc = self.assertRaises(exception.ImageNotFound, self.image_repo.get, fake_uuid) self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) def test_get_forbidden(self): self.assertRaises(exception.NotFound, self.image_repo.get, UUID4) def test_list(self): images = self.image_repo.list() image_ids = set([i.image_id for i in images]) self.assertEqual(set([UUID1, UUID2, UUID3]), image_ids) def _do_test_list_status(self, status, expected): self.context = glance.context.RequestContext( user=USER1, tenant=TENANT3) self.image_repo = glance.db.ImageRepo(self.context, self.db) images = self.image_repo.list(member_status=status) self.assertEqual(expected, len(images)) def test_list_status(self): self._do_test_list_status(None, 3) def test_list_status_pending(self): self._do_test_list_status('pending', 2) def test_list_status_rejected(self): self._do_test_list_status('rejected', 2) def test_list_status_all(self): self._do_test_list_status('all', 3) def test_list_with_marker(self): full_images = self.image_repo.list() full_ids = [i.image_id for i in full_images] marked_images = self.image_repo.list(marker=full_ids[0]) actual_ids = [i.image_id for i in marked_images] self.assertEqual(full_ids[1:], actual_ids) def test_list_with_last_marker(self): images = self.image_repo.list() marked_images = self.image_repo.list(marker=images[-1].image_id) self.assertEqual(0, len(marked_images)) def test_limited_list(self): limited_images = self.image_repo.list(limit=2) self.assertEqual(2, len(limited_images)) def test_list_with_marker_and_limit(self): full_images = self.image_repo.list() full_ids = [i.image_id for i in full_images] marked_images = self.image_repo.list(marker=full_ids[0], limit=1) actual_ids = [i.image_id for i in marked_images] self.assertEqual(full_ids[1:2], actual_ids) def test_list_private_images(self): filters = {'visibility': 'private'} images = self.image_repo.list(filters=filters) image_ids = set([i.image_id for i in images]) self.assertEqual(set([UUID2]), image_ids) def test_list_with_checksum_filter_single_image(self): filters = {'checksum': CHECKSUM} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(1, len(image_ids)) self.assertEqual([UUID1], image_ids) def test_list_with_checksum_filter_multiple_images(self): filters = {'checksum': CHCKSUM1} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(2, len(image_ids)) self.assertIn(UUID2, image_ids) self.assertIn(UUID3, image_ids) def test_list_with_wrong_checksum(self): WRONG_CHKSUM = 'd2fd42f979e1ed1aafadc7eb9354bff839c858cd' filters = {'checksum': WRONG_CHKSUM} images = self.image_repo.list(filters=filters) self.assertEqual(0, len(images)) def test_list_with_tags_filter_single_tag(self): filters = {'tags': ['ping']} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(1, len(image_ids)) self.assertEqual([UUID1], image_ids) def test_list_with_tags_filter_multiple_tags(self): filters = {'tags': ['ping', 'pong']} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(1, len(image_ids)) self.assertEqual([UUID1], image_ids) def test_list_with_tags_filter_multiple_tags_and_nonexistent(self): filters = {'tags': ['ping', 'fake']} images = self.image_repo.list(filters=filters) image_ids = list([i.image_id for i in images]) self.assertEqual(0, len(image_ids)) def test_list_with_wrong_tags(self): filters = {'tags': ['fake']} images = self.image_repo.list(filters=filters) self.assertEqual(0, len(images)) def test_list_public_images(self): filters = {'visibility': 'public'} images = self.image_repo.list(filters=filters) image_ids = set([i.image_id for i in images]) self.assertEqual(set([UUID1, UUID3]), image_ids) def test_sorted_list(self): images = self.image_repo.list(sort_key=['size'], sort_dir=['asc']) image_ids = [i.image_id for i in images] self.assertEqual([UUID1, UUID2, UUID3], image_ids) def test_sorted_list_with_multiple_keys(self): temp_id = 'd80a1a6c-bd1f-41c5-90ee-81afedb1d58d' image = _db_fixture(temp_id, owner=TENANT1, checksum=CHECKSUM, name='1', size=1024, is_public=True, status='active', locations=[{'url': UUID1_LOCATION, 'metadata': UUID1_LOCATION_METADATA, 'status': 'active'}]) self.db.image_create(None, image) images = self.image_repo.list(sort_key=['name', 'size'], sort_dir=['asc']) image_ids = [i.image_id for i in images] self.assertEqual([UUID1, temp_id, UUID2, UUID3], image_ids) images = self.image_repo.list(sort_key=['size', 'name'], sort_dir=['asc']) image_ids = [i.image_id for i in images] self.assertEqual([UUID1, UUID2, temp_id, UUID3], image_ids) def test_sorted_list_with_multiple_dirs(self): temp_id = 'd80a1a6c-bd1f-41c5-90ee-81afedb1d58d' image = _db_fixture(temp_id, owner=TENANT1, checksum=CHECKSUM, name='1', size=1024, is_public=True, status='active', locations=[{'url': UUID1_LOCATION, 'metadata': UUID1_LOCATION_METADATA, 'status': 'active'}]) self.db.image_create(None, image) images = self.image_repo.list(sort_key=['name', 'size'], sort_dir=['asc', 'desc']) image_ids = [i.image_id for i in images] self.assertEqual([temp_id, UUID1, UUID2, UUID3], image_ids) images = self.image_repo.list(sort_key=['name', 'size'], sort_dir=['desc', 'asc']) image_ids = [i.image_id for i in images] self.assertEqual([UUID3, UUID2, UUID1, temp_id], image_ids) def test_add_image(self): image = self.image_factory.new_image(name='added image') self.assertEqual(image.updated_at, image.created_at) self.image_repo.add(image) retreived_image = self.image_repo.get(image.image_id) self.assertEqual('added image', retreived_image.name) self.assertEqual(image.updated_at, retreived_image.updated_at) def test_save_image(self): image = self.image_repo.get(UUID1) original_update_time = image.updated_at image.name = 'foo' image.tags = ['king', 'kong'] self.image_repo.save(image) current_update_time = image.updated_at self.assertTrue(current_update_time > original_update_time) image = self.image_repo.get(UUID1) self.assertEqual('foo', image.name) self.assertEqual(set(['king', 'kong']), image.tags) self.assertEqual(current_update_time, image.updated_at) def test_save_image_not_found(self): fake_uuid = str(uuid.uuid4()) image = self.image_repo.get(UUID1) image.image_id = fake_uuid exc = self.assertRaises(exception.ImageNotFound, self.image_repo.save, image) self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) def test_remove_image(self): image = self.image_repo.get(UUID1) previous_update_time = image.updated_at self.image_repo.remove(image) self.assertTrue(image.updated_at > previous_update_time) self.assertRaises(exception.ImageNotFound, self.image_repo.get, UUID1) def test_remove_image_not_found(self): fake_uuid = str(uuid.uuid4()) image = self.image_repo.get(UUID1) image.image_id = fake_uuid exc = self.assertRaises( exception.ImageNotFound, self.image_repo.remove, image) self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) class TestEncryptedLocations(test_utils.BaseTestCase): def setUp(self): super(TestEncryptedLocations, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext( user=USER1, tenant=TENANT1) self.image_repo = glance.db.ImageRepo(self.context, self.db) self.image_factory = glance.domain.ImageFactory() self.crypt_key = '0123456789abcdef' self.config(metadata_encryption_key=self.crypt_key) self.foo_bar_location = [{'url': 'foo', 'metadata': {}, 'status': 'active'}, {'url': 'bar', 'metadata': {}, 'status': 'active'}] def test_encrypt_locations_on_add(self): image = self.image_factory.new_image(UUID1) image.locations = self.foo_bar_location self.image_repo.add(image) db_data = self.db.image_get(self.context, UUID1) self.assertNotEqual(db_data['locations'], ['foo', 'bar']) decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url']) for l in db_data['locations']] self.assertEqual([l['url'] for l in self.foo_bar_location], decrypted_locations) def test_encrypt_locations_on_save(self): image = self.image_factory.new_image(UUID1) self.image_repo.add(image) image.locations = self.foo_bar_location self.image_repo.save(image) db_data = self.db.image_get(self.context, UUID1) self.assertNotEqual(db_data['locations'], ['foo', 'bar']) decrypted_locations = [crypt.urlsafe_decrypt(self.crypt_key, l['url']) for l in db_data['locations']] self.assertEqual([l['url'] for l in self.foo_bar_location], decrypted_locations) def test_decrypt_locations_on_get(self): url_loc = ['ping', 'pong'] orig_locations = [{'url': l, 'metadata': {}, 'status': 'active'} for l in url_loc] encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l) for l in url_loc] encrypted_locations = [{'url': l, 'metadata': {}, 'status': 'active'} for l in encrypted_locs] self.assertNotEqual(encrypted_locations, orig_locations) db_data = _db_fixture(UUID1, owner=TENANT1, locations=encrypted_locations) self.db.image_create(None, db_data) image = self.image_repo.get(UUID1) self.assertIn('id', image.locations[0]) self.assertIn('id', image.locations[1]) image.locations[0].pop('id') image.locations[1].pop('id') self.assertEqual(orig_locations, image.locations) def test_decrypt_locations_on_list(self): url_loc = ['ping', 'pong'] orig_locations = [{'url': l, 'metadata': {}, 'status': 'active'} for l in url_loc] encrypted_locs = [crypt.urlsafe_encrypt(self.crypt_key, l) for l in url_loc] encrypted_locations = [{'url': l, 'metadata': {}, 'status': 'active'} for l in encrypted_locs] self.assertNotEqual(encrypted_locations, orig_locations) db_data = _db_fixture(UUID1, owner=TENANT1, locations=encrypted_locations) self.db.image_create(None, db_data) image = self.image_repo.list()[0] self.assertIn('id', image.locations[0]) self.assertIn('id', image.locations[1]) image.locations[0].pop('id') image.locations[1].pop('id') self.assertEqual(orig_locations, image.locations) class TestImageMemberRepo(test_utils.BaseTestCase): def setUp(self): super(TestImageMemberRepo, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext( user=USER1, tenant=TENANT1) self.image_repo = glance.db.ImageRepo(self.context, self.db) self.image_member_factory = glance.domain.ImageMemberFactory() self._create_images() self._create_image_members() image = self.image_repo.get(UUID1) self.image_member_repo = glance.db.ImageMemberRepo(self.context, self.db, image) def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, name='1', size=256, status='active'), _db_fixture(UUID2, owner=TENANT1, name='2', size=512, is_public=False), ] [self.db.image_create(None, image) for image in self.images] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID1, TENANT2), _db_image_member_fixture(UUID1, TENANT3), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_list(self): image_members = self.image_member_repo.list() image_member_ids = set([i.member_id for i in image_members]) self.assertEqual(set([TENANT2, TENANT3]), image_member_ids) def test_list_no_members(self): image = self.image_repo.get(UUID2) self.image_member_repo_uuid2 = glance.db.ImageMemberRepo( self.context, self.db, image) image_members = self.image_member_repo_uuid2.list() image_member_ids = set([i.member_id for i in image_members]) self.assertEqual(set([]), image_member_ids) def test_save_image_member(self): image_member = self.image_member_repo.get(TENANT2) image_member.status = 'accepted' self.image_member_repo.save(image_member) image_member_updated = self.image_member_repo.get(TENANT2) self.assertEqual(image_member.id, image_member_updated.id) self.assertEqual('accepted', image_member_updated.status) def test_add_image_member(self): image = self.image_repo.get(UUID1) image_member = self.image_member_factory.new_image_member(image, TENANT4) self.assertIsNone(image_member.id) self.image_member_repo.add(image_member) retreived_image_member = self.image_member_repo.get(TENANT4) self.assertIsNotNone(retreived_image_member.id) self.assertEqual(image_member.image_id, retreived_image_member.image_id) self.assertEqual(image_member.member_id, retreived_image_member.member_id) self.assertEqual('pending', retreived_image_member.status) def test_add_duplicate_image_member(self): image = self.image_repo.get(UUID1) image_member = self.image_member_factory.new_image_member(image, TENANT4) self.assertIsNone(image_member.id) self.image_member_repo.add(image_member) retreived_image_member = self.image_member_repo.get(TENANT4) self.assertIsNotNone(retreived_image_member.id) self.assertEqual(image_member.image_id, retreived_image_member.image_id) self.assertEqual(image_member.member_id, retreived_image_member.member_id) self.assertEqual('pending', retreived_image_member.status) self.assertRaises(exception.Duplicate, self.image_member_repo.add, image_member) def test_get_image_member(self): image = self.image_repo.get(UUID1) image_member = self.image_member_factory.new_image_member(image, TENANT4) self.assertIsNone(image_member.id) self.image_member_repo.add(image_member) member = self.image_member_repo.get(image_member.member_id) self.assertEqual(member.id, image_member.id) self.assertEqual(member.image_id, image_member.image_id) self.assertEqual(member.member_id, image_member.member_id) self.assertEqual('pending', member.status) def test_get_nonexistent_image_member(self): fake_image_member_id = 'fake' self.assertRaises(exception.NotFound, self.image_member_repo.get, fake_image_member_id) def test_remove_image_member(self): image_member = self.image_member_repo.get(TENANT2) self.image_member_repo.remove(image_member) self.assertRaises(exception.NotFound, self.image_member_repo.get, TENANT2) def test_remove_image_member_does_not_exist(self): fake_uuid = str(uuid.uuid4()) image = self.image_repo.get(UUID2) fake_member = glance.domain.ImageMemberFactory().new_image_member( image, TENANT4) fake_member.id = fake_uuid exc = self.assertRaises(exception.NotFound, self.image_member_repo.remove, fake_member) self.assertIn(fake_uuid, encodeutils.exception_to_unicode(exc)) class TestTaskRepo(test_utils.BaseTestCase): def setUp(self): super(TestTaskRepo, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext(user=USER1, tenant=TENANT1) self.task_repo = glance.db.TaskRepo(self.context, self.db) self.task_factory = glance.domain.TaskFactory() self.fake_task_input = ('{"import_from": ' '"swift://cloud.foo/account/mycontainer/path"' ',"import_from_format": "qcow2"}') self._create_tasks() def _create_tasks(self): self.tasks = [ _db_task_fixture(UUID1, type='import', status='pending', input=self.fake_task_input, result='', owner=TENANT1, message='', ), _db_task_fixture(UUID2, type='import', status='processing', input=self.fake_task_input, result='', owner=TENANT1, message='', ), _db_task_fixture(UUID3, type='import', status='failure', input=self.fake_task_input, result='', owner=TENANT1, message='', ), _db_task_fixture(UUID4, type='import', status='success', input=self.fake_task_input, result='', owner=TENANT2, message='', ), ] [self.db.task_create(None, task) for task in self.tasks] def test_get(self): task = self.task_repo.get(UUID1) self.assertEqual(task.task_id, UUID1) self.assertEqual('import', task.type) self.assertEqual('pending', task.status) self.assertEqual(task.task_input, self.fake_task_input) self.assertEqual('', task.result) self.assertEqual('', task.message) self.assertEqual(task.owner, TENANT1) def test_get_not_found(self): self.assertRaises(exception.NotFound, self.task_repo.get, str(uuid.uuid4())) def test_get_forbidden(self): self.assertRaises(exception.NotFound, self.task_repo.get, UUID4) def test_list(self): tasks = self.task_repo.list() task_ids = set([i.task_id for i in tasks]) self.assertEqual(set([UUID1, UUID2, UUID3]), task_ids) def test_list_with_type(self): filters = {'type': 'import'} tasks = self.task_repo.list(filters=filters) task_ids = set([i.task_id for i in tasks]) self.assertEqual(set([UUID1, UUID2, UUID3]), task_ids) def test_list_with_status(self): filters = {'status': 'failure'} tasks = self.task_repo.list(filters=filters) task_ids = set([i.task_id for i in tasks]) self.assertEqual(set([UUID3]), task_ids) def test_list_with_marker(self): full_tasks = self.task_repo.list() full_ids = [i.task_id for i in full_tasks] marked_tasks = self.task_repo.list(marker=full_ids[0]) actual_ids = [i.task_id for i in marked_tasks] self.assertEqual(full_ids[1:], actual_ids) def test_list_with_last_marker(self): tasks = self.task_repo.list() marked_tasks = self.task_repo.list(marker=tasks[-1].task_id) self.assertEqual(0, len(marked_tasks)) def test_limited_list(self): limited_tasks = self.task_repo.list(limit=2) self.assertEqual(2, len(limited_tasks)) def test_list_with_marker_and_limit(self): full_tasks = self.task_repo.list() full_ids = [i.task_id for i in full_tasks] marked_tasks = self.task_repo.list(marker=full_ids[0], limit=1) actual_ids = [i.task_id for i in marked_tasks] self.assertEqual(full_ids[1:2], actual_ids) def test_sorted_list(self): tasks = self.task_repo.list(sort_key='status', sort_dir='desc') task_ids = [i.task_id for i in tasks] self.assertEqual([UUID2, UUID1, UUID3], task_ids) def test_add_task(self): task_type = 'import' task = self.task_factory.new_task(task_type, None, task_input=self.fake_task_input) self.assertEqual(task.updated_at, task.created_at) self.task_repo.add(task) retrieved_task = self.task_repo.get(task.task_id) self.assertEqual(task.updated_at, retrieved_task.updated_at) self.assertEqual(self.fake_task_input, retrieved_task.task_input) def test_save_task(self): task = self.task_repo.get(UUID1) original_update_time = task.updated_at self.task_repo.save(task) current_update_time = task.updated_at self.assertTrue(current_update_time > original_update_time) task = self.task_repo.get(UUID1) self.assertEqual(current_update_time, task.updated_at) def test_remove_task(self): task = self.task_repo.get(UUID1) self.task_repo.remove(task) self.assertRaises(exception.NotFound, self.task_repo.get, task.task_id) class RetryOnDeadlockTestCase(test_utils.BaseTestCase): def test_raise_deadlock(self): class TestException(Exception): pass self.attempts = 3 def _mock_get_session(): def _raise_exceptions(): self.attempts -= 1 if self.attempts <= 0: raise TestException("Exit") raise db_exc.DBDeadlock("Fake Exception") return _raise_exceptions with mock.patch.object(api, 'get_session') as sess: sess.side_effect = _mock_get_session() try: api._image_update(None, {}, 'fake-id') except TestException: self.assertEqual(3, sess.call_count) # Test retry on image destroy if db deadlock occurs self.attempts = 3 with mock.patch.object(api, 'get_session') as sess: sess.side_effect = _mock_get_session() try: api.image_destroy(None, 'fake-id') except TestException: self.assertEqual(3, sess.call_count) glance-12.0.0/glance/tests/unit/test_glance_manage.py0000664000567000056710000000322012701407047023707 0ustar jenkinsjenkins00000000000000# Copyright 2016 OpenStack Foundation. # Copyright 2016 NTT Data. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from glance.cmd import manage from glance import context from glance.db.sqlalchemy import api as db_api import glance.tests.utils as test_utils TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' class DBCommandsTestCase(test_utils.BaseTestCase): def setUp(self): super(DBCommandsTestCase, self).setUp() self.commands = manage.DbCommands() self.context = context.RequestContext( user=USER1, tenant=TENANT1) @mock.patch.object(db_api, 'purge_deleted_rows') @mock.patch.object(context, 'get_admin_context') def test_purge_command(self, mock_context, mock_db_purge): mock_context.return_value = self.context self.commands.purge(1, 100) mock_db_purge.assert_called_once_with(self.context, 1, 100) def test_purge_command_negative_rows(self): exit = self.assertRaises(SystemExit, self.commands.purge, 1, -1) self.assertEqual("Minimal rows limit is 1.", exit.code) glance-12.0.0/glance/tests/unit/test_cached_images.py0000664000567000056710000001115712701407047023712 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Yahoo! Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools import webob from glance.api import cached_images from glance.api import policy from glance.common import exception from glance import image_cache class FakePolicyEnforcer(policy.Enforcer): def __init__(self): self.default_rule = '' self.policy_path = '' self.policy_file_mtime = None self.policy_file_contents = None def enforce(self, context, action, target): return 'pass' def check(rule, target, creds, exc=None, *args, **kwargs): return 'pass' def _check(self, context, rule, target, *args, **kwargs): return 'pass' class FakeCache(image_cache.ImageCache): def __init__(self): self.init_driver() self.deleted_images = [] def init_driver(self): pass def get_cached_images(self): return {'id': 'test'} def delete_cached_image(self, image_id): self.deleted_images.append(image_id) def delete_all_cached_images(self): self.delete_cached_image(self.get_cached_images().get('id')) return 1 def get_queued_images(self): return {'test': 'passed'} def queue_image(self, image_id): return 'pass' def delete_queued_image(self, image_id): self.deleted_images.append(image_id) def delete_all_queued_images(self): self.delete_queued_image('deleted_img') return 1 class FakeController(cached_images.Controller): def __init__(self): self.cache = FakeCache() self.policy = FakePolicyEnforcer() class TestController(testtools.TestCase): def test_initialization_without_conf(self): self.assertRaises(exception.BadDriverConfiguration, cached_images.Controller) class TestCachedImages(testtools.TestCase): def setUp(self): super(TestCachedImages, self).setUp() test_controller = FakeController() self.controller = test_controller def test_get_cached_images(self): req = webob.Request.blank('') req.context = 'test' result = self.controller.get_cached_images(req) self.assertEqual({'cached_images': {'id': 'test'}}, result) def test_delete_cached_image(self): req = webob.Request.blank('') req.context = 'test' self.controller.delete_cached_image(req, image_id='test') self.assertEqual(['test'], self.controller.cache.deleted_images) def test_delete_cached_images(self): req = webob.Request.blank('') req.context = 'test' self.assertEqual({'num_deleted': 1}, self.controller.delete_cached_images(req)) self.assertEqual(['test'], self.controller.cache.deleted_images) def test_policy_enforce_forbidden(self): def fake_enforce(context, action, target): raise exception.Forbidden() self.controller.policy.enforce = fake_enforce req = webob.Request.blank('') req.context = 'test' self.assertRaises(webob.exc.HTTPForbidden, self.controller.get_cached_images, req) def test_get_queued_images(self): req = webob.Request.blank('') req.context = 'test' result = self.controller.get_queued_images(req) self.assertEqual({'queued_images': {'test': 'passed'}}, result) def test_queue_image(self): req = webob.Request.blank('') req.context = 'test' self.controller.queue_image(req, image_id='test1') def test_delete_queued_image(self): req = webob.Request.blank('') req.context = 'test' self.controller.delete_queued_image(req, 'deleted_img') self.assertEqual(['deleted_img'], self.controller.cache.deleted_images) def test_delete_queued_images(self): req = webob.Request.blank('') req.context = 'test' self.assertEqual({'num_deleted': 1}, self.controller.delete_queued_images(req)) self.assertEqual(['deleted_img'], self.controller.cache.deleted_images) glance-12.0.0/glance/tests/unit/common/0000775000567000056710000000000012701407204021023 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/common/test_timeutils.py0000664000567000056710000002106112701407047024460 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import calendar import datetime import iso8601 import mock from glance.common import timeutils from glance.tests import utils as test_utils class TimeUtilsTest(test_utils.BaseTestCase): def setUp(self): super(TimeUtilsTest, self).setUp() self.skynet_self_aware_time_str = '1997-08-29T06:14:00Z' self.skynet_self_aware_time_ms_str = '1997-08-29T06:14:00.000123Z' self.skynet_self_aware_time = datetime.datetime(1997, 8, 29, 6, 14, 0) self.skynet_self_aware_ms_time = datetime.datetime( 1997, 8, 29, 6, 14, 0, 123) self.one_minute_before = datetime.datetime(1997, 8, 29, 6, 13, 0) self.one_minute_after = datetime.datetime(1997, 8, 29, 6, 15, 0) self.skynet_self_aware_time_perfect_str = '1997-08-29T06:14:00.000000' self.skynet_self_aware_time_perfect = datetime.datetime(1997, 8, 29, 6, 14, 0) def test_isotime(self): with mock.patch('datetime.datetime') as datetime_mock: datetime_mock.utcnow.return_value = self.skynet_self_aware_time dt = timeutils.isotime() self.assertEqual(dt, self.skynet_self_aware_time_str) def test_isotimei_micro_second_precision(self): with mock.patch('datetime.datetime') as datetime_mock: datetime_mock.utcnow.return_value = self.skynet_self_aware_ms_time dt = timeutils.isotime(subsecond=True) self.assertEqual(dt, self.skynet_self_aware_time_ms_str) def test_parse_isotime(self): expect = timeutils.parse_isotime(self.skynet_self_aware_time_str) skynet_self_aware_time_utc = self.skynet_self_aware_time.replace( tzinfo=iso8601.iso8601.UTC) self.assertEqual(skynet_self_aware_time_utc, expect) def test_parse_isotime_micro_second_precision(self): expect = timeutils.parse_isotime(self.skynet_self_aware_time_ms_str) skynet_self_aware_time_ms_utc = self.skynet_self_aware_ms_time.replace( tzinfo=iso8601.iso8601.UTC) self.assertEqual(skynet_self_aware_time_ms_utc, expect) def test_utcnow(self): with mock.patch('datetime.datetime') as datetime_mock: datetime_mock.utcnow.return_value = self.skynet_self_aware_time self.assertEqual(timeutils.utcnow(), self.skynet_self_aware_time) self.assertFalse(timeutils.utcnow() == self.skynet_self_aware_time) self.assertTrue(timeutils.utcnow()) def test_delta_seconds(self): before = timeutils.utcnow() after = before + datetime.timedelta(days=7, seconds=59, microseconds=123456) self.assertAlmostEquals(604859.123456, timeutils.delta_seconds(before, after)) def test_iso8601_from_timestamp(self): utcnow = timeutils.utcnow() iso = timeutils.isotime(utcnow) ts = calendar.timegm(utcnow.timetuple()) self.assertEqual(iso, timeutils.iso8601_from_timestamp(ts)) class TestIso8601Time(test_utils.BaseTestCase): def _instaneous(self, timestamp, yr, mon, day, hr, minute, sec, micro): self.assertEqual(timestamp.year, yr) self.assertEqual(timestamp.month, mon) self.assertEqual(timestamp.day, day) self.assertEqual(timestamp.hour, hr) self.assertEqual(timestamp.minute, minute) self.assertEqual(timestamp.second, sec) self.assertEqual(timestamp.microsecond, micro) def _do_test(self, time_str, yr, mon, day, hr, minute, sec, micro, shift): DAY_SECONDS = 24 * 60 * 60 timestamp = timeutils.parse_isotime(time_str) self._instaneous(timestamp, yr, mon, day, hr, minute, sec, micro) offset = timestamp.tzinfo.utcoffset(None) self.assertEqual(offset.seconds + offset.days * DAY_SECONDS, shift) def test_zulu(self): time_str = '2012-02-14T20:53:07Z' self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 0, 0) def test_zulu_micros(self): time_str = '2012-02-14T20:53:07.123Z' self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 123000, 0) def test_offset_east(self): time_str = '2012-02-14T20:53:07+04:30' offset = 4.5 * 60 * 60 self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 0, offset) def test_offset_east_micros(self): time_str = '2012-02-14T20:53:07.42+04:30' offset = 4.5 * 60 * 60 self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 420000, offset) def test_offset_west(self): time_str = '2012-02-14T20:53:07-05:30' offset = -5.5 * 60 * 60 self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 0, offset) def test_offset_west_micros(self): time_str = '2012-02-14T20:53:07.654321-05:30' offset = -5.5 * 60 * 60 self._do_test(time_str, 2012, 2, 14, 20, 53, 7, 654321, offset) def test_compare(self): zulu = timeutils.parse_isotime('2012-02-14T20:53:07') east = timeutils.parse_isotime('2012-02-14T20:53:07-01:00') west = timeutils.parse_isotime('2012-02-14T20:53:07+01:00') self.assertTrue(east > west) self.assertTrue(east > zulu) self.assertTrue(zulu > west) def test_compare_micros(self): zulu = timeutils.parse_isotime('2012-02-14T20:53:07.6544') east = timeutils.parse_isotime('2012-02-14T19:53:07.654321-01:00') west = timeutils.parse_isotime('2012-02-14T21:53:07.655+01:00') self.assertTrue(east < west) self.assertTrue(east < zulu) self.assertTrue(zulu < west) def test_zulu_roundtrip(self): time_str = '2012-02-14T20:53:07Z' zulu = timeutils.parse_isotime(time_str) self.assertEqual(zulu.tzinfo, iso8601.iso8601.UTC) self.assertEqual(timeutils.isotime(zulu), time_str) def test_east_roundtrip(self): time_str = '2012-02-14T20:53:07-07:00' east = timeutils.parse_isotime(time_str) self.assertEqual(east.tzinfo.tzname(None), '-07:00') self.assertEqual(timeutils.isotime(east), time_str) def test_west_roundtrip(self): time_str = '2012-02-14T20:53:07+11:30' west = timeutils.parse_isotime(time_str) self.assertEqual(west.tzinfo.tzname(None), '+11:30') self.assertEqual(timeutils.isotime(west), time_str) def test_now_roundtrip(self): time_str = timeutils.isotime() now = timeutils.parse_isotime(time_str) self.assertEqual(now.tzinfo, iso8601.iso8601.UTC) self.assertEqual(timeutils.isotime(now), time_str) def test_zulu_normalize(self): time_str = '2012-02-14T20:53:07Z' zulu = timeutils.parse_isotime(time_str) normed = timeutils.normalize_time(zulu) self._instaneous(normed, 2012, 2, 14, 20, 53, 7, 0) def test_east_normalize(self): time_str = '2012-02-14T20:53:07-07:00' east = timeutils.parse_isotime(time_str) normed = timeutils.normalize_time(east) self._instaneous(normed, 2012, 2, 15, 3, 53, 7, 0) def test_west_normalize(self): time_str = '2012-02-14T20:53:07+21:00' west = timeutils.parse_isotime(time_str) normed = timeutils.normalize_time(west) self._instaneous(normed, 2012, 2, 13, 23, 53, 7, 0) def test_normalize_aware_to_naive(self): dt = datetime.datetime(2011, 2, 14, 20, 53, 7) time_str = '2011-02-14T20:53:07+21:00' aware = timeutils.parse_isotime(time_str) naive = timeutils.normalize_time(aware) self.assertTrue(naive < dt) def test_normalize_zulu_aware_to_naive(self): dt = datetime.datetime(2011, 2, 14, 20, 53, 7) time_str = '2011-02-14T19:53:07Z' aware = timeutils.parse_isotime(time_str) naive = timeutils.normalize_time(aware) self.assertTrue(naive < dt) def test_normalize_naive(self): dt = datetime.datetime(2011, 2, 14, 20, 53, 7) dtn = datetime.datetime(2011, 2, 14, 19, 53, 7) naive = timeutils.normalize_time(dtn) self.assertTrue(naive < dt) glance-12.0.0/glance/tests/unit/common/test_wsgi_ipv6.py0000664000567000056710000000525612701407047024366 0ustar jenkinsjenkins00000000000000# Copyright 2010-2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import subprocess from glance.tests import utils as test_utils script = """ import os import sys # Spoof module installed sys.modules['%s'] = object %s os.environ['EVENTLET_NO_GREENDNS'] = '%s' if 'eventlet' %s in sys.modules: sys.exit(2) try: import glance.cmd except ImportError: sys.exit(%d) else: sys.exit(%d) """ eventlet_no_dns = script % ('fake', 'import eventlet', 'foo', 'not', 1, 0) no_eventlet_no_dns = script % ('fake', '', 'foo', '', 1, 0) no_eventlet_no_greendns = script % ('dns', '', 'yes', '', 1, 0) eventlet_no_greendns = script % ('dns', 'import eventlet', 'yes', 'not', 1, 0) no_eventlet_greendns = script % ('dns', '', 'no', '', 1, 0) eventlet_greendns = script % ('dns', 'import eventlet', 'no', 'not', 0, 1) class IPv6ServerTest(test_utils.BaseTestCase): def test_no_evnetlet_no_dnspython(self): """Test eventlet not imported and dnspython not installed""" rc = subprocess.call(['python', '-c', no_eventlet_no_dns]) self.assertEqual(0, rc) def test_evnetlet_no_dnspython(self): """Test eventlet pre-imported but dnspython not installed""" rc = subprocess.call(['python', '-c', eventlet_no_dns]) self.assertEqual(0, rc) def test_no_eventlet_no_greendns(self): """Test eventlet not imported with EVENTLET_NO_GREENDNS='yes'""" rc = subprocess.call(['python', '-c', no_eventlet_no_greendns]) self.assertEqual(0, rc) def test_eventlet_no_greendns(self): """Test eventlet pre-imported with EVENTLET_NO_GREENDNS='yes'""" rc = subprocess.call(['python', '-c', eventlet_no_greendns]) self.assertEqual(0, rc) def test_no_eventlet_w_greendns(self): """Test eventlet not imported with EVENTLET_NO_GREENDNS='no'""" rc = subprocess.call(['python', '-c', no_eventlet_greendns]) self.assertEqual(0, rc) def test_eventlet_w_greendns(self): """Test eventlet pre-imported with EVENTLET_NO_GREENDNS='no'""" rc = subprocess.call(['python', '-c', eventlet_greendns]) self.assertEqual(0, rc) glance-12.0.0/glance/tests/unit/common/test_signature_utils.py0000664000567000056710000007653412701407047025701 0ustar jenkinsjenkins00000000000000# Copyright (c) The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime import mock import unittest from cryptography import exceptions as crypto_exception from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import dsa from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from debtcollector import removals from glance.common import exception from glance.common import signature_utils from glance.tests import utils as test_utils TEST_RSA_PRIVATE_KEY = rsa.generate_private_key(public_exponent=3, key_size=1024, backend=default_backend()) TEST_DSA_PRIVATE_KEY = dsa.generate_private_key(key_size=3072, backend=default_backend()) # secp521r1 is assumed to be available on all supported platforms TEST_ECC_PRIVATE_KEY = ec.generate_private_key(ec.SECP521R1(), default_backend()) # Required image property names (SIGNATURE, HASH_METHOD, KEY_TYPE, CERT_UUID) = ( signature_utils.SIGNATURE, signature_utils.HASH_METHOD, signature_utils.KEY_TYPE, signature_utils.CERT_UUID ) # Required image property names # TODO(bpoulos): remove when 'sign-the-hash' approach is no longer supported (OLD_SIGNATURE, OLD_HASH_METHOD, OLD_KEY_TYPE, OLD_CERT_UUID) = ( signature_utils.OLD_SIGNATURE, signature_utils.OLD_HASH_METHOD, signature_utils.OLD_KEY_TYPE, signature_utils.OLD_CERT_UUID ) # Optional image property names for RSA-PSS (MASK_GEN_ALG, PSS_SALT_LENGTH) = ( signature_utils.MASK_GEN_ALG, signature_utils.PSS_SALT_LENGTH ) class FakeKeyManager(object): def __init__(self): self.certs = {'invalid_format_cert': FakeCastellanCertificate('A' * 256, 'BLAH'), 'valid_format_cert': FakeCastellanCertificate('A' * 256, 'X.509')} def get(self, context, cert_uuid): cert = self.certs.get(cert_uuid) if cert is None: raise Exception("No matching certificate found.") return cert class FakeCastellanCertificate(object): def __init__(self, data, cert_format): self.data = data self.cert_format = cert_format @property def format(self): return self.cert_format def get_encoded(self): return self.data class FakeCryptoCertificate(object): def __init__(self, pub_key=TEST_RSA_PRIVATE_KEY.public_key(), not_valid_before=(datetime.datetime.utcnow() - datetime.timedelta(hours=1)), not_valid_after=(datetime.datetime.utcnow() + datetime.timedelta(hours=1))): self.pub_key = pub_key self.cert_not_valid_before = not_valid_before self.cert_not_valid_after = not_valid_after def public_key(self): return self.pub_key @property def not_valid_before(self): return self.cert_not_valid_before @property def not_valid_after(self): return self.cert_not_valid_after class BadPublicKey(object): def verifier(self, signature, padding, hash_method): return None class TestSignatureUtils(test_utils.BaseTestCase): """Test methods of signature_utils""" @removals.remove(message="This will be removed in the N cycle.") def test_old_should_verify_signature(self): image_props = {OLD_CERT_UUID: 'OLD_CERT_UUID', OLD_HASH_METHOD: 'OLD_HASH_METHOD', OLD_SIGNATURE: 'OLD_SIGNATURE', OLD_KEY_TYPE: 'SIG_KEY_TYPE'} self.assertTrue(signature_utils.should_verify_signature(image_props)) @removals.remove(message="This will be removed in the N cycle.") def test_old_should_verify_signature_fail(self): bad_image_properties = [{OLD_CERT_UUID: 'OLD_CERT_UUID', OLD_HASH_METHOD: 'OLD_HASH_METHOD', OLD_SIGNATURE: 'OLD_SIGNATURE'}, {OLD_CERT_UUID: 'OLD_CERT_UUID', OLD_HASH_METHOD: 'OLD_HASH_METHOD', OLD_KEY_TYPE: 'SIG_KEY_TYPE'}, {OLD_CERT_UUID: 'OLD_CERT_UUID', OLD_SIGNATURE: 'OLD_SIGNATURE', OLD_KEY_TYPE: 'SIG_KEY_TYPE'}, {OLD_HASH_METHOD: 'OLD_HASH_METHOD', OLD_SIGNATURE: 'OLD_SIGNATURE', OLD_KEY_TYPE: 'SIG_KEY_TYPE'}] for bad_props in bad_image_properties: result = signature_utils.should_verify_signature(bad_props) self.assertFalse(result) @unittest.skipIf(not default_backend().hash_supported(hashes.SHA256()), "SHA-2 hash algorithms not supported by backend") @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_PSS(self, mock_get_pub_key): checksum_hash = b'224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() for hash_name, hash_alg in signature_utils.HASH_METHODS.items(): signer = TEST_RSA_PRIVATE_KEY.signer( padding.PSS( mgf=padding.MGF1(hash_alg), salt_length=padding.PSS.MAX_LENGTH ), hash_alg ) signer.update(checksum_hash) signature = base64.b64encode(signer.finalize()) image_props = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: hash_name, OLD_KEY_TYPE: 'RSA-PSS', MASK_GEN_ALG: 'MGF1', OLD_SIGNATURE: signature} self.assertTrue(signature_utils.verify_signature(None, checksum_hash, image_props)) @unittest.skipIf(not default_backend().hash_supported(hashes.SHA256()), "SHA-2 hash algorithms not supported by backend") @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_custom_PSS_salt(self, mock_get_pub_key): checksum_hash = b'224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() custom_salt_length = 32 for hash_name, hash_alg in signature_utils.HASH_METHODS.items(): signer = TEST_RSA_PRIVATE_KEY.signer( padding.PSS( mgf=padding.MGF1(hash_alg), salt_length=custom_salt_length ), hash_alg ) signer.update(checksum_hash) signature = base64.b64encode(signer.finalize()) image_props = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: hash_name, OLD_KEY_TYPE: 'RSA-PSS', MASK_GEN_ALG: 'MGF1', PSS_SALT_LENGTH: custom_salt_length, OLD_SIGNATURE: signature} self.assertTrue(signature_utils.verify_signature(None, checksum_hash, image_props)) @unittest.skipIf(not default_backend().hash_supported(hashes.SHA256()), "SHA-2 hash algorithms not supported by backend") @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_bad_signature(self, mock_get_pub_key): checksum_hash = '224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() image_properties = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: 'SHA-256', OLD_KEY_TYPE: 'RSA-PSS', MASK_GEN_ALG: 'MGF1', OLD_SIGNATURE: 'BLAH'} self.assertRaisesRegex(exception.SignatureVerificationError, 'Signature verification failed.', signature_utils.verify_signature, None, checksum_hash, image_properties) @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.should_verify_signature') def test_old_verify_signature_invalid_image_props(self, mock_should): mock_should.return_value = False self.assertRaisesRegex(exception.SignatureVerificationError, 'Required image properties for signature' ' verification do not exist. Cannot verify' ' signature.', signature_utils.verify_signature, None, None, None) @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_bad_sig_key_type(self, mock_get_pub_key): checksum_hash = '224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() image_properties = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: 'SHA-256', OLD_KEY_TYPE: 'BLAH', MASK_GEN_ALG: 'MGF1', OLD_SIGNATURE: 'BLAH'} self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid signature key type: .*', signature_utils.verify_signature, None, checksum_hash, image_properties) @unittest.skipIf(not default_backend().hash_supported(hashes.SHA256()), "SHA-2 hash algorithms not supported by backend") @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_RSA_no_mask_gen(self, mock_get_pub_key): checksum_hash = '224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() image_properties = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: 'SHA-256', OLD_KEY_TYPE: 'RSA-PSS', OLD_SIGNATURE: 'BLAH'} self.assertRaisesRegex(exception.SignatureVerificationError, 'Signature verification failed.', signature_utils.verify_signature, None, checksum_hash, image_properties) @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_RSA_bad_mask_gen(self, mock_get_pub_key): checksum_hash = '224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() image_properties = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: 'SHA-256', OLD_KEY_TYPE: 'RSA-PSS', MASK_GEN_ALG: 'BLAH', OLD_SIGNATURE: 'BLAH'} self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid mask_gen_algorithm: .*', signature_utils.verify_signature, None, checksum_hash, image_properties) @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_bad_pss_salt(self, mock_get_pub_key): checksum_hash = '224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() image_properties = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: 'SHA-256', OLD_KEY_TYPE: 'RSA-PSS', MASK_GEN_ALG: 'MGF1', PSS_SALT_LENGTH: 'BLAH', OLD_SIGNATURE: 'BLAH'} self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid pss_salt_length: .*', signature_utils.verify_signature, None, checksum_hash, image_properties) @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_verifier_none(self, mock_get_pub_key): checksum_hash = '224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = BadPublicKey() image_properties = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: 'SHA-256', OLD_KEY_TYPE: 'RSA-PSS', MASK_GEN_ALG: 'MGF1', OLD_SIGNATURE: 'BLAH'} self.assertRaisesRegex(exception.SignatureVerificationError, 'Error occurred while verifying' ' the signature', signature_utils.verify_signature, None, checksum_hash, image_properties) @removals.remove(message="This will be removed in the N cycle.") @mock.patch('glance.common.signature_utils.get_public_key') def test_old_verify_signature_unsupported_algorithm(self, mock_get_pub_key): checksum_hash = '224626ae19824466f2a7f39ab7b80f7f' public_key = TEST_RSA_PRIVATE_KEY.public_key() public_key.verifier = mock.MagicMock( side_effect=crypto_exception.UnsupportedAlgorithm( "When OpenSSL is older than 1.0.1 then only SHA1 is " "supported with MGF1.", crypto_exception._Reasons.UNSUPPORTED_HASH)) mock_get_pub_key.return_value = public_key image_properties = {OLD_CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', OLD_HASH_METHOD: 'SHA-256', OLD_KEY_TYPE: 'RSA-PSS', OLD_SIGNATURE: 'BLAH'} self.assertRaisesRegex(exception.SignatureVerificationError, 'Unable to verify signature since the ' 'algorithm is unsupported on this system', signature_utils.verify_signature, None, checksum_hash, image_properties) def test_should_create_verifier(self): image_props = {CERT_UUID: 'CERT_UUID', HASH_METHOD: 'HASH_METHOD', SIGNATURE: 'SIGNATURE', KEY_TYPE: 'SIG_KEY_TYPE'} self.assertTrue(signature_utils.should_create_verifier(image_props)) def test_should_create_verifier_fail(self): bad_image_properties = [{CERT_UUID: 'CERT_UUID', HASH_METHOD: 'HASH_METHOD', SIGNATURE: 'SIGNATURE'}, {CERT_UUID: 'CERT_UUID', HASH_METHOD: 'HASH_METHOD', KEY_TYPE: 'SIG_KEY_TYPE'}, {CERT_UUID: 'CERT_UUID', SIGNATURE: 'SIGNATURE', KEY_TYPE: 'SIG_KEY_TYPE'}, {HASH_METHOD: 'HASH_METHOD', SIGNATURE: 'SIGNATURE', KEY_TYPE: 'SIG_KEY_TYPE'}] for bad_props in bad_image_properties: result = signature_utils.should_create_verifier(bad_props) self.assertFalse(result) @unittest.skipIf(not default_backend().hash_supported(hashes.SHA256()), "SHA-2 hash algorithms not supported by backend") @mock.patch('glance.common.signature_utils.get_public_key') def test_verify_signature_PSS(self, mock_get_pub_key): data = b'224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() for hash_name, hash_alg in signature_utils.HASH_METHODS.items(): signer = TEST_RSA_PRIVATE_KEY.signer( padding.PSS( mgf=padding.MGF1(hash_alg), salt_length=padding.PSS.MAX_LENGTH ), hash_alg ) signer.update(data) signature = base64.b64encode(signer.finalize()) image_props = {CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', HASH_METHOD: hash_name, KEY_TYPE: 'RSA-PSS', SIGNATURE: signature} verifier = signature_utils.get_verifier(None, image_props) verifier.update(data) verifier.verify() @mock.patch('glance.common.signature_utils.get_public_key') def test_verify_signature_ECC(self, mock_get_pub_key): data = b'224626ae19824466f2a7f39ab7b80f7f' # test every ECC curve for curve in signature_utils.ECC_CURVES: key_type_name = 'ECC_' + curve.name.upper() try: signature_utils.SignatureKeyType.lookup(key_type_name) except exception.SignatureVerificationError: import warnings warnings.warn("ECC curve '%s' not supported" % curve.name) continue # Create a private key to use private_key = ec.generate_private_key(curve, default_backend()) mock_get_pub_key.return_value = private_key.public_key() for hash_name, hash_alg in signature_utils.HASH_METHODS.items(): signer = private_key.signer( ec.ECDSA(hash_alg) ) signer.update(data) signature = base64.b64encode(signer.finalize()) image_props = {CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', HASH_METHOD: hash_name, KEY_TYPE: key_type_name, SIGNATURE: signature} verifier = signature_utils.get_verifier(None, image_props) verifier.update(data) verifier.verify() @mock.patch('glance.common.signature_utils.get_public_key') def test_verify_signature_DSA(self, mock_get_pub_key): data = b'224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_DSA_PRIVATE_KEY.public_key() for hash_name, hash_alg in signature_utils.HASH_METHODS.items(): signer = TEST_DSA_PRIVATE_KEY.signer( hash_alg ) signer.update(data) signature = base64.b64encode(signer.finalize()) image_props = {CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', HASH_METHOD: hash_name, KEY_TYPE: 'DSA', SIGNATURE: signature} verifier = signature_utils.get_verifier(None, image_props) verifier.update(data) verifier.verify() @unittest.skipIf(not default_backend().hash_supported(hashes.SHA256()), "SHA-2 hash algorithms not supported by backend") @mock.patch('glance.common.signature_utils.get_public_key') def test_verify_signature_bad_signature(self, mock_get_pub_key): data = b'224626ae19824466f2a7f39ab7b80f7f' mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() image_properties = {CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', HASH_METHOD: 'SHA-256', KEY_TYPE: 'RSA-PSS', SIGNATURE: 'BLAH'} verifier = signature_utils.get_verifier(None, image_properties) verifier.update(data) self.assertRaises(crypto_exception.InvalidSignature, verifier.verify) @mock.patch('glance.common.signature_utils.get_public_key') def test_verify_signature_unsupported_algorithm(self, mock_get_pub_key): public_key = TEST_RSA_PRIVATE_KEY.public_key() public_key.verifier = mock.MagicMock( side_effect=crypto_exception.UnsupportedAlgorithm( "When OpenSSL is older than 1.0.1 then only SHA1 is " "supported with MGF1.", crypto_exception._Reasons.UNSUPPORTED_HASH)) mock_get_pub_key.return_value = public_key image_properties = {CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', HASH_METHOD: 'SHA-256', KEY_TYPE: 'RSA-PSS', SIGNATURE: 'BLAH'} self.assertRaisesRegexp(exception.SignatureVerificationError, 'Unable to verify signature since the ' 'algorithm is unsupported on this system', signature_utils.get_verifier, None, image_properties) @mock.patch('glance.common.signature_utils.should_create_verifier') def test_verify_signature_invalid_image_props(self, mock_should): mock_should.return_value = False self.assertRaisesRegexp(exception.SignatureVerificationError, 'Required image properties for signature' ' verification do not exist. Cannot verify' ' signature.', signature_utils.get_verifier, None, None) @mock.patch('glance.common.signature_utils.get_public_key') def test_verify_signature_bad_sig_key_type(self, mock_get_pub_key): mock_get_pub_key.return_value = TEST_RSA_PRIVATE_KEY.public_key() image_properties = {CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', HASH_METHOD: 'SHA-256', KEY_TYPE: 'BLAH', SIGNATURE: 'BLAH'} self.assertRaisesRegexp(exception.SignatureVerificationError, 'Invalid signature key type: .*', signature_utils.get_verifier, None, image_properties) @mock.patch('glance.common.signature_utils.get_public_key') def test_get_verifier_none(self, mock_get_pub_key): mock_get_pub_key.return_value = BadPublicKey() image_properties = {CERT_UUID: 'fea14bc2-d75f-4ba5-bccc-b5c924ad0693', HASH_METHOD: 'SHA-256', KEY_TYPE: 'RSA-PSS', SIGNATURE: 'BLAH'} self.assertRaisesRegexp(exception.SignatureVerificationError, 'Error occurred while creating' ' the verifier', signature_utils.get_verifier, None, image_properties) def test_get_signature(self): signature = b'A' * 256 data = base64.b64encode(signature) self.assertEqual(signature, signature_utils.get_signature(data)) def test_get_signature_fail(self): self.assertRaisesRegex(exception.SignatureVerificationError, 'The signature data was not properly' ' encoded using base64', signature_utils.get_signature, '///') def test_get_hash_method(self): hash_dict = signature_utils.HASH_METHODS for hash_name in hash_dict.keys(): hash_class = signature_utils.get_hash_method(hash_name).__class__ self.assertIsInstance(hash_dict[hash_name], hash_class) def test_get_hash_method_fail(self): self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid signature hash method: .*', signature_utils.get_hash_method, 'SHA-2') def test_get_signature_key_type_lookup(self): for sig_format in ['RSA-PSS', 'ECC_SECT571K1']: sig_key_type = signature_utils.SignatureKeyType.lookup(sig_format) self.assertIsInstance(sig_key_type, signature_utils.SignatureKeyType) self.assertEqual(sig_format, sig_key_type.name) def test_signature_key_type_lookup_fail(self): self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid signature key type: .*', signature_utils.SignatureKeyType.lookup, 'RSB-PSS') @mock.patch('glance.common.signature_utils.get_certificate') def test_get_public_key_rsa(self, mock_get_cert): fake_cert = FakeCryptoCertificate() mock_get_cert.return_value = fake_cert sig_key_type = signature_utils.SignatureKeyType.lookup('RSA-PSS') result_pub_key = signature_utils.get_public_key(None, None, sig_key_type) self.assertEqual(fake_cert.public_key(), result_pub_key) @mock.patch('glance.common.signature_utils.get_certificate') def test_get_public_key_ecc(self, mock_get_cert): fake_cert = FakeCryptoCertificate(TEST_ECC_PRIVATE_KEY.public_key()) mock_get_cert.return_value = fake_cert sig_key_type = signature_utils.SignatureKeyType.lookup('ECC_SECP521R1') result_pub_key = signature_utils.get_public_key(None, None, sig_key_type) self.assertEqual(fake_cert.public_key(), result_pub_key) @mock.patch('glance.common.signature_utils.get_certificate') def test_get_public_key_dsa(self, mock_get_cert): fake_cert = FakeCryptoCertificate(TEST_DSA_PRIVATE_KEY.public_key()) mock_get_cert.return_value = fake_cert sig_key_type = signature_utils.SignatureKeyType.lookup('DSA') result_pub_key = signature_utils.get_public_key(None, None, sig_key_type) self.assertEqual(fake_cert.public_key(), result_pub_key) @mock.patch('glance.common.signature_utils.get_certificate') def test_get_public_key_invalid_key(self, mock_get_certificate): bad_pub_key = 'A' * 256 mock_get_certificate.return_value = FakeCryptoCertificate(bad_pub_key) sig_key_type = signature_utils.SignatureKeyType.lookup('RSA-PSS') self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid public key type for ' 'signature key type: .*', signature_utils.get_public_key, None, None, sig_key_type) @mock.patch('cryptography.x509.load_der_x509_certificate') @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_certificate(self, mock_key_manager_API, mock_load_cert): cert_uuid = 'valid_format_cert' x509_cert = FakeCryptoCertificate() mock_load_cert.return_value = x509_cert self.assertEqual(x509_cert, signature_utils.get_certificate(None, cert_uuid)) @mock.patch('cryptography.x509.load_der_x509_certificate') @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_expired_certificate(self, mock_key_manager_API, mock_load_cert): cert_uuid = 'valid_format_cert' x509_cert = FakeCryptoCertificate( not_valid_after=datetime.datetime.utcnow() - datetime.timedelta(hours=1)) mock_load_cert.return_value = x509_cert self.assertRaisesRegex(exception.SignatureVerificationError, 'Certificate is not valid after: .*', signature_utils.get_certificate, None, cert_uuid) @mock.patch('cryptography.x509.load_der_x509_certificate') @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_not_yet_valid_certificate(self, mock_key_manager_API, mock_load_cert): cert_uuid = 'valid_format_cert' x509_cert = FakeCryptoCertificate( not_valid_before=datetime.datetime.utcnow() + datetime.timedelta(hours=1)) mock_load_cert.return_value = x509_cert self.assertRaisesRegex(exception.SignatureVerificationError, 'Certificate is not valid before: .*', signature_utils.get_certificate, None, cert_uuid) @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_certificate_key_manager_fail(self, mock_key_manager_API): bad_cert_uuid = 'fea14bc2-d75f-4ba5-bccc-b5c924ad0695' self.assertRaisesRegex(exception.SignatureVerificationError, 'Unable to retrieve certificate with ID: .*', signature_utils.get_certificate, None, bad_cert_uuid) @mock.patch('castellan.key_manager.API', return_value=FakeKeyManager()) def test_get_certificate_invalid_format(self, mock_API): cert_uuid = 'invalid_format_cert' self.assertRaisesRegex(exception.SignatureVerificationError, 'Invalid certificate format: .*', signature_utils.get_certificate, None, cert_uuid) glance-12.0.0/glance/tests/unit/common/test_rpc.py0000664000567000056710000003052412701407047023231 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import encodeutils import routes import six import webob from glance.common import exception from glance.common import rpc from glance.common import wsgi from glance.tests.unit import base from glance.tests import utils as test_utils CONF = cfg.CONF class FakeResource(object): """ Fake resource defining some methods that will be called later by the api. """ def get_images(self, context, keyword=None): return keyword def count_images(self, context, images): return len(images) def get_all_images(self, context): return False def raise_value_error(self, context): raise ValueError("Yep, Just like that!") def raise_weird_error(self, context): class WeirdError(Exception): pass raise WeirdError("Weirdness") def create_api(): deserializer = rpc.RPCJSONDeserializer() serializer = rpc.RPCJSONSerializer() controller = rpc.Controller() controller.register(FakeResource()) res = wsgi.Resource(controller, deserializer, serializer) mapper = routes.Mapper() mapper.connect("/rpc", controller=res, conditions=dict(method=["POST"]), action="__call__") return test_utils.FakeAuthMiddleware(wsgi.Router(mapper), is_admin=True) class TestRPCController(base.IsolatedUnitTest): def setUp(self): super(TestRPCController, self).setUp() self.res = FakeResource() self.controller = rpc.Controller() self.controller.register(self.res) def test_register(self): res = FakeResource() controller = rpc.Controller() controller.register(res) self.assertIn("get_images", controller._registered) self.assertIn("get_all_images", controller._registered) def test_reigster_filtered(self): res = FakeResource() controller = rpc.Controller() controller.register(res, filtered=["get_all_images"]) self.assertIn("get_all_images", controller._registered) def test_reigster_excluded(self): res = FakeResource() controller = rpc.Controller() controller.register(res, excluded=["get_all_images"]) self.assertIn("get_images", controller._registered) def test_reigster_refiner(self): res = FakeResource() controller = rpc.Controller() # Not callable self.assertRaises(TypeError, controller.register, res, refiner="get_all_images") # Filter returns False controller.register(res, refiner=lambda x: False) self.assertNotIn("get_images", controller._registered) self.assertNotIn("get_images", controller._registered) # Filter returns True controller.register(res, refiner=lambda x: True) self.assertIn("get_images", controller._registered) self.assertIn("get_images", controller._registered) def test_request(self): api = create_api() req = webob.Request.blank('/rpc') req.method = 'POST' req.body = jsonutils.dump_as_bytes([ { "command": "get_images", "kwargs": {"keyword": 1} } ]) res = req.get_response(api) returned = jsonutils.loads(res.body) self.assertIsInstance(returned, list) self.assertEqual(1, returned[0]) def test_request_exc(self): api = create_api() req = webob.Request.blank('/rpc') req.method = 'POST' req.body = jsonutils.dump_as_bytes([ { "command": "get_all_images", "kwargs": {"keyword": 1} } ]) # Sending non-accepted keyword # to get_all_images method res = req.get_response(api) returned = jsonutils.loads(res.body) self.assertIn("_error", returned[0]) def test_rpc_errors(self): api = create_api() req = webob.Request.blank('/rpc') req.method = 'POST' req.content_type = 'application/json' # Body is not a list, it should fail req.body = jsonutils.dump_as_bytes({}) res = req.get_response(api) self.assertEqual(400, res.status_int) # cmd is not dict, it should fail. req.body = jsonutils.dump_as_bytes([None]) res = req.get_response(api) self.assertEqual(400, res.status_int) # No command key, it should fail. req.body = jsonutils.dump_as_bytes([{}]) res = req.get_response(api) self.assertEqual(400, res.status_int) # kwargs not dict, it should fail. req.body = jsonutils.dump_as_bytes([{"command": "test", "kwargs": 2}]) res = req.get_response(api) self.assertEqual(400, res.status_int) # Command does not exist, it should fail. req.body = jsonutils.dump_as_bytes([{"command": "test"}]) res = req.get_response(api) self.assertEqual(404, res.status_int) def test_rpc_exception_propagation(self): api = create_api() req = webob.Request.blank('/rpc') req.method = 'POST' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes([{"command": "raise_value_error"}]) res = req.get_response(api) self.assertEqual(200, res.status_int) returned = jsonutils.loads(res.body)[0] err_cls = 'builtins.ValueError' if six.PY3 else 'exceptions.ValueError' self.assertEqual(err_cls, returned['_error']['cls']) req.body = jsonutils.dump_as_bytes([{"command": "raise_weird_error"}]) res = req.get_response(api) self.assertEqual(200, res.status_int) returned = jsonutils.loads(res.body)[0] self.assertEqual('glance.common.exception.RPCError', returned['_error']['cls']) class TestRPCClient(base.IsolatedUnitTest): def setUp(self): super(TestRPCClient, self).setUp() self.api = create_api() self.client = rpc.RPCClient(host="http://127.0.0.1:9191") self.client._do_request = self.fake_request def fake_request(self, method, url, body, headers): req = webob.Request.blank(url.path) body = encodeutils.to_utf8(body) req.body = body req.method = method webob_res = req.get_response(self.api) return test_utils.FakeHTTPResponse(status=webob_res.status_int, headers=webob_res.headers, data=webob_res.body) def test_method_proxy(self): proxy = self.client.some_method self.assertIn("method_proxy", str(proxy)) def test_bulk_request(self): commands = [{"command": "get_images", 'kwargs': {'keyword': True}}, {"command": "get_all_images"}] res = self.client.bulk_request(commands) self.assertEqual(2, len(res)) self.assertTrue(res[0]) self.assertFalse(res[1]) def test_exception_raise(self): try: self.client.raise_value_error() self.fail("Exception not raised") except ValueError as exc: self.assertEqual("Yep, Just like that!", str(exc)) def test_rpc_exception(self): try: self.client.raise_weird_error() self.fail("Exception not raised") except exception.RPCError: pass def test_non_str_or_dict_response(self): rst = self.client.count_images(images=[1, 2, 3, 4]) self.assertEqual(4, rst) self.assertIsInstance(rst, int) class TestRPCJSONSerializer(test_utils.BaseTestCase): def test_to_json(self): fixture = {"key": "value"} expected = b'{"key": "value"}' actual = rpc.RPCJSONSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_to_json_with_date_format_value(self): fixture = {"date": datetime.datetime(1900, 3, 8, 2)} expected = {"date": {"_value": "1900-03-08T02:00:00", "_type": "datetime"}} actual = rpc.RPCJSONSerializer().to_json(fixture) actual = jsonutils.loads(actual) for k in expected['date']: self.assertEqual(expected['date'][k], actual['date'][k]) def test_to_json_with_more_deep_format(self): fixture = {"is_public": True, "name": [{"name1": "test"}]} expected = {"is_public": True, "name": [{"name1": "test"}]} actual = rpc.RPCJSONSerializer().to_json(fixture) actual = wsgi.JSONResponseSerializer().to_json(fixture) actual = jsonutils.loads(actual) for k in expected: self.assertEqual(expected[k], actual[k]) def test_default(self): fixture = {"key": "value"} response = webob.Response() rpc.RPCJSONSerializer().default(response, fixture) self.assertEqual(200, response.status_int) content_types = [h for h in response.headerlist if h[0] == 'Content-Type'] self.assertEqual(1, len(content_types)) self.assertEqual('application/json', response.content_type) self.assertEqual(b'{"key": "value"}', response.body) class TestRPCJSONDeserializer(test_utils.BaseTestCase): def test_has_body_no_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' request.headers.pop('Content-Length') self.assertFalse(rpc.RPCJSONDeserializer().has_body(request)) def test_has_body_zero_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' request.headers['Content-Length'] = 0 self.assertFalse(rpc.RPCJSONDeserializer().has_body(request)) def test_has_body_has_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' self.assertIn('Content-Length', request.headers) self.assertTrue(rpc.RPCJSONDeserializer().has_body(request)) def test_no_body_no_content_length(self): request = wsgi.Request.blank('/') self.assertFalse(rpc.RPCJSONDeserializer().has_body(request)) def test_from_json(self): fixture = '{"key": "value"}' expected = {"key": "value"} actual = rpc.RPCJSONDeserializer().from_json(fixture) self.assertEqual(expected, actual) def test_from_json_malformed(self): fixture = 'kjasdklfjsklajf' self.assertRaises(webob.exc.HTTPBadRequest, rpc.RPCJSONDeserializer().from_json, fixture) def test_default_no_body(self): request = wsgi.Request.blank('/') actual = rpc.RPCJSONDeserializer().default(request) expected = {} self.assertEqual(expected, actual) def test_default_with_body(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'{"key": "value"}' actual = rpc.RPCJSONDeserializer().default(request) expected = {"body": {"key": "value"}} self.assertEqual(expected, actual) def test_has_body_has_transfer_encoding(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'fake_body' request.headers['transfer-encoding'] = '' self.assertIn('transfer-encoding', request.headers) self.assertTrue(rpc.RPCJSONDeserializer().has_body(request)) def test_to_json_with_date_format_value(self): fixture = ('{"date": {"_value": "1900-03-08T02:00:00.000000",' '"_type": "datetime"}}') expected = {"date": datetime.datetime(1900, 3, 8, 2)} actual = rpc.RPCJSONDeserializer().from_json(fixture) self.assertEqual(expected, actual) glance-12.0.0/glance/tests/unit/common/__init__.py0000664000567000056710000000000012701407047023127 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/common/test_property_utils.py0000664000567000056710000005757012701407047025563 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.api import policy from glance.common import exception from glance.common import property_utils import glance.context from glance.tests.unit import base CONFIG_SECTIONS = [ '^x_owner_.*', 'spl_create_prop', 'spl_read_prop', 'spl_read_only_prop', 'spl_update_prop', 'spl_update_only_prop', 'spl_delete_prop', 'spl_delete_empty_prop', '^x_all_permitted.*', '^x_none_permitted.*', 'x_none_read', 'x_none_update', 'x_none_delete', 'x_case_insensitive', 'x_foo_matcher', 'x_foo_*', '.*' ] def create_context(policy, roles=None): if roles is None: roles = [] return glance.context.RequestContext(roles=roles, policy_enforcer=policy) class TestPropertyRulesWithRoles(base.IsolatedUnitTest): def setUp(self): super(TestPropertyRulesWithRoles, self).setUp() self.set_property_protections() self.policy = policy.Enforcer() def tearDown(self): super(TestPropertyRulesWithRoles, self).tearDown() def test_is_property_protections_enabled_true(self): self.config(property_protection_file="property-protections.conf") self.assertTrue(property_utils.is_property_protection_enabled()) def test_is_property_protections_enabled_false(self): self.config(property_protection_file=None) self.assertFalse(property_utils.is_property_protection_enabled()) def test_property_protection_file_doesnt_exist(self): self.config(property_protection_file='fake-file.conf') self.assertRaises(exception.InvalidPropertyProtectionConfiguration, property_utils.PropertyRules) def test_property_protection_with_mutually_exclusive_rule(self): exclusive_rules = {'.*': {'create': ['@', '!'], 'read': ['fake-role'], 'update': ['fake-role'], 'delete': ['fake-role']}} self.set_property_protection_rules(exclusive_rules) self.assertRaises(exception.InvalidPropertyProtectionConfiguration, property_utils.PropertyRules) def test_property_protection_with_malformed_rule(self): malformed_rules = {'^[0-9)': {'create': ['fake-role'], 'read': ['fake-role'], 'update': ['fake-role'], 'delete': ['fake-role']}} self.set_property_protection_rules(malformed_rules) self.assertRaises(exception.InvalidPropertyProtectionConfiguration, property_utils.PropertyRules) def test_property_protection_with_missing_operation(self): rules_with_missing_operation = {'^[0-9]': {'create': ['fake-role'], 'update': ['fake-role'], 'delete': ['fake-role']}} self.set_property_protection_rules(rules_with_missing_operation) self.assertRaises(exception.InvalidPropertyProtectionConfiguration, property_utils.PropertyRules) def test_property_protection_with_misspelt_operation(self): rules_with_misspelt_operation = {'^[0-9]': {'create': ['fake-role'], 'rade': ['fake-role'], 'update': ['fake-role'], 'delete': ['fake-role']}} self.set_property_protection_rules(rules_with_misspelt_operation) self.assertRaises(exception.InvalidPropertyProtectionConfiguration, property_utils.PropertyRules) def test_property_protection_with_whitespace(self): rules_whitespace = { '^test_prop.*': { 'create': ['member ,fake-role'], 'read': ['fake-role, member'], 'update': ['fake-role, member'], 'delete': ['fake-role, member'] } } self.set_property_protection_rules(rules_whitespace) self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules('test_prop_1', 'read', create_context(self.policy, ['member']))) self.assertTrue(self.rules_checker.check_property_rules('test_prop_1', 'read', create_context(self.policy, ['fake-role']))) def test_check_property_rules_invalid_action(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertFalse(self.rules_checker.check_property_rules('test_prop', 'hall', create_context(self.policy, ['admin']))) def test_check_property_rules_read_permitted_admin_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertTrue(self.rules_checker.check_property_rules('test_prop', 'read', create_context(self.policy, ['admin']))) def test_check_property_rules_read_permitted_specific_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertTrue(self.rules_checker.check_property_rules( 'x_owner_prop', 'read', create_context(self.policy, ['member']))) def test_check_property_rules_read_unpermitted_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertFalse(self.rules_checker.check_property_rules('test_prop', 'read', create_context(self.policy, ['member']))) def test_check_property_rules_create_permitted_admin_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertTrue(self.rules_checker.check_property_rules('test_prop', 'create', create_context(self.policy, ['admin']))) def test_check_property_rules_create_permitted_specific_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertTrue(self.rules_checker.check_property_rules( 'x_owner_prop', 'create', create_context(self.policy, ['member']))) def test_check_property_rules_create_unpermitted_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertFalse(self.rules_checker.check_property_rules('test_prop', 'create', create_context(self.policy, ['member']))) def test_check_property_rules_update_permitted_admin_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertTrue(self.rules_checker.check_property_rules('test_prop', 'update', create_context(self.policy, ['admin']))) def test_check_property_rules_update_permitted_specific_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertTrue(self.rules_checker.check_property_rules( 'x_owner_prop', 'update', create_context(self.policy, ['member']))) def test_check_property_rules_update_unpermitted_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertFalse(self.rules_checker.check_property_rules('test_prop', 'update', create_context(self.policy, ['member']))) def test_check_property_rules_delete_permitted_admin_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertTrue(self.rules_checker.check_property_rules('test_prop', 'delete', create_context(self.policy, ['admin']))) def test_check_property_rules_delete_permitted_specific_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertTrue(self.rules_checker.check_property_rules( 'x_owner_prop', 'delete', create_context(self.policy, ['member']))) def test_check_property_rules_delete_unpermitted_role(self): self.rules_checker = property_utils.PropertyRules(self.policy) self.assertFalse(self.rules_checker.check_property_rules('test_prop', 'delete', create_context(self.policy, ['member']))) def test_property_config_loaded_in_order(self): """ Verify the order of loaded config sections matches that from the configuration file """ self.rules_checker = property_utils.PropertyRules(self.policy) self.assertEqual(CONFIG_SECTIONS, property_utils.CONFIG.sections()) def test_property_rules_loaded_in_order(self): """ Verify rules are iterable in the same order as read from the config file """ self.rules_checker = property_utils.PropertyRules(self.policy) for i in range(len(property_utils.CONFIG.sections())): self.assertEqual(property_utils.CONFIG.sections()[i], self.rules_checker.rules[i][0].pattern) def test_check_property_rules_create_all_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_all_permitted', 'create', create_context(self.policy, ['']))) def test_check_property_rules_read_all_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_all_permitted', 'read', create_context(self.policy, ['']))) def test_check_property_rules_update_all_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_all_permitted', 'update', create_context(self.policy, ['']))) def test_check_property_rules_delete_all_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_all_permitted', 'delete', create_context(self.policy, ['']))) def test_check_property_rules_create_none_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_none_permitted', 'create', create_context(self.policy, ['']))) def test_check_property_rules_read_none_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_none_permitted', 'read', create_context(self.policy, ['']))) def test_check_property_rules_update_none_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_none_permitted', 'update', create_context(self.policy, ['']))) def test_check_property_rules_delete_none_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_none_permitted', 'delete', create_context(self.policy, ['']))) def test_check_property_rules_read_none(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_none_read', 'create', create_context(self.policy, ['admin', 'member']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_read', 'read', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_read', 'update', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_read', 'delete', create_context(self.policy, ['']))) def test_check_property_rules_update_none(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_none_update', 'create', create_context(self.policy, ['admin', 'member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_none_update', 'read', create_context(self.policy, ['admin', 'member']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_update', 'update', create_context(self.policy, ['']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_none_update', 'delete', create_context(self.policy, ['admin', 'member']))) def test_check_property_rules_delete_none(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_none_delete', 'create', create_context(self.policy, ['admin', 'member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_none_delete', 'read', create_context(self.policy, ['admin', 'member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_none_delete', 'update', create_context(self.policy, ['admin', 'member']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_delete', 'delete', create_context(self.policy, ['']))) def test_check_return_first_match(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_foo_matcher', 'create', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_foo_matcher', 'read', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_foo_matcher', 'update', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_foo_matcher', 'delete', create_context(self.policy, ['']))) def test_check_case_insensitive_property_rules(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_case_insensitive', 'create', create_context(self.policy, ['member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_case_insensitive', 'read', create_context(self.policy, ['member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_case_insensitive', 'update', create_context(self.policy, ['member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_case_insensitive', 'delete', create_context(self.policy, ['member']))) class TestPropertyRulesWithPolicies(base.IsolatedUnitTest): def setUp(self): super(TestPropertyRulesWithPolicies, self).setUp() self.set_property_protections(use_policies=True) self.policy = policy.Enforcer() self.rules_checker = property_utils.PropertyRules(self.policy) def tearDown(self): super(TestPropertyRulesWithPolicies, self).tearDown() def test_check_property_rules_create_permitted_specific_policy(self): self.assertTrue(self.rules_checker.check_property_rules( 'spl_creator_policy', 'create', create_context(self.policy, ['spl_role']))) def test_check_property_rules_create_unpermitted_policy(self): self.assertFalse(self.rules_checker.check_property_rules( 'spl_creator_policy', 'create', create_context(self.policy, ['fake-role']))) def test_check_property_rules_read_permitted_specific_policy(self): self.assertTrue(self.rules_checker.check_property_rules( 'spl_creator_policy', 'read', create_context(self.policy, ['spl_role']))) def test_check_property_rules_read_unpermitted_policy(self): self.assertFalse(self.rules_checker.check_property_rules( 'spl_creator_policy', 'read', create_context(self.policy, ['fake-role']))) def test_check_property_rules_update_permitted_specific_policy(self): self.assertTrue(self.rules_checker.check_property_rules( 'spl_creator_policy', 'update', create_context(self.policy, ['admin']))) def test_check_property_rules_update_unpermitted_policy(self): self.assertFalse(self.rules_checker.check_property_rules( 'spl_creator_policy', 'update', create_context(self.policy, ['fake-role']))) def test_check_property_rules_delete_permitted_specific_policy(self): self.assertTrue(self.rules_checker.check_property_rules( 'spl_creator_policy', 'delete', create_context(self.policy, ['admin']))) def test_check_property_rules_delete_unpermitted_policy(self): self.assertFalse(self.rules_checker.check_property_rules( 'spl_creator_policy', 'delete', create_context(self.policy, ['fake-role']))) def test_property_protection_with_malformed_rule(self): malformed_rules = {'^[0-9)': {'create': ['fake-policy'], 'read': ['fake-policy'], 'update': ['fake-policy'], 'delete': ['fake-policy']}} self.set_property_protection_rules(malformed_rules) self.assertRaises(exception.InvalidPropertyProtectionConfiguration, property_utils.PropertyRules) def test_property_protection_with_multiple_policies(self): malformed_rules = {'^x_.*': {'create': ['fake-policy, another_pol'], 'read': ['fake-policy'], 'update': ['fake-policy'], 'delete': ['fake-policy']}} self.set_property_protection_rules(malformed_rules) self.assertRaises(exception.InvalidPropertyProtectionConfiguration, property_utils.PropertyRules) def test_check_property_rules_create_all_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_all_permitted', 'create', create_context(self.policy, ['']))) def test_check_property_rules_read_all_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_all_permitted', 'read', create_context(self.policy, ['']))) def test_check_property_rules_update_all_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_all_permitted', 'update', create_context(self.policy, ['']))) def test_check_property_rules_delete_all_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_all_permitted', 'delete', create_context(self.policy, ['']))) def test_check_property_rules_create_none_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_none_permitted', 'create', create_context(self.policy, ['']))) def test_check_property_rules_read_none_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_none_permitted', 'read', create_context(self.policy, ['']))) def test_check_property_rules_update_none_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_none_permitted', 'update', create_context(self.policy, ['']))) def test_check_property_rules_delete_none_permitted(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_none_permitted', 'delete', create_context(self.policy, ['']))) def test_check_property_rules_read_none(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_none_read', 'create', create_context(self.policy, ['admin', 'member']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_read', 'read', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_read', 'update', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_read', 'delete', create_context(self.policy, ['']))) def test_check_property_rules_update_none(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_none_update', 'create', create_context(self.policy, ['admin', 'member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_none_update', 'read', create_context(self.policy, ['admin', 'member']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_update', 'update', create_context(self.policy, ['']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_none_update', 'delete', create_context(self.policy, ['admin', 'member']))) def test_check_property_rules_delete_none(self): self.rules_checker = property_utils.PropertyRules() self.assertTrue(self.rules_checker.check_property_rules( 'x_none_delete', 'create', create_context(self.policy, ['admin', 'member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_none_delete', 'read', create_context(self.policy, ['admin', 'member']))) self.assertTrue(self.rules_checker.check_property_rules( 'x_none_delete', 'update', create_context(self.policy, ['admin', 'member']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_none_delete', 'delete', create_context(self.policy, ['']))) def test_check_return_first_match(self): self.rules_checker = property_utils.PropertyRules() self.assertFalse(self.rules_checker.check_property_rules( 'x_foo_matcher', 'create', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_foo_matcher', 'read', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_foo_matcher', 'update', create_context(self.policy, ['']))) self.assertFalse(self.rules_checker.check_property_rules( 'x_foo_matcher', 'delete', create_context(self.policy, ['']))) glance-12.0.0/glance/tests/unit/common/test_utils.py0000664000567000056710000004424112701407047023606 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2015 Mirantis, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile import six import webob from glance.common import exception from glance.common import utils from glance.tests import utils as test_utils class TestUtils(test_utils.BaseTestCase): """Test routines in glance.utils""" def test_cooperative_reader(self): """Ensure cooperative reader class accesses all bytes of file""" BYTES = 1024 bytes_read = 0 with tempfile.TemporaryFile('w+') as tmp_fd: tmp_fd.write('*' * BYTES) tmp_fd.seek(0) for chunk in utils.CooperativeReader(tmp_fd): bytes_read += len(chunk) self.assertEqual(BYTES, bytes_read) bytes_read = 0 with tempfile.TemporaryFile('w+') as tmp_fd: tmp_fd.write('*' * BYTES) tmp_fd.seek(0) reader = utils.CooperativeReader(tmp_fd) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertEqual(BYTES, bytes_read) def test_cooperative_reader_of_iterator(self): """Ensure cooperative reader supports iterator backends too""" data = b'abcdefgh' data_list = [data[i:i + 1] * 3 for i in range(len(data))] reader = utils.CooperativeReader(data_list) chunks = [] while True: chunks.append(reader.read(3)) if chunks[-1] == b'': break meat = b''.join(chunks) self.assertEqual(b'aaabbbcccdddeeefffggghhh', meat) def test_cooperative_reader_of_iterator_stop_iteration_err(self): """Ensure cooperative reader supports iterator backends too""" reader = utils.CooperativeReader([l * 3 for l in '']) chunks = [] while True: chunks.append(reader.read(3)) if chunks[-1] == b'': break meat = b''.join(chunks) self.assertEqual(b'', meat) def _create_generator(self, chunk_size, max_iterations): chars = b'abc' iteration = 0 while True: index = iteration % len(chars) chunk = chars[index:index + 1] * chunk_size yield chunk iteration += 1 if iteration >= max_iterations: raise StopIteration() def _test_reader_chunked(self, chunk_size, read_size, max_iterations=5): generator = self._create_generator(chunk_size, max_iterations) reader = utils.CooperativeReader(generator) result = bytearray() while True: data = reader.read(read_size) if len(data) == 0: break self.assertLessEqual(len(data), read_size) result += data expected = (b'a' * chunk_size + b'b' * chunk_size + b'c' * chunk_size + b'a' * chunk_size + b'b' * chunk_size) self.assertEqual(expected, bytes(result)) def test_cooperative_reader_preserves_size_chunk_less_then_read(self): self._test_reader_chunked(43, 101) def test_cooperative_reader_preserves_size_chunk_equals_read(self): self._test_reader_chunked(1024, 1024) def test_cooperative_reader_preserves_size_chunk_more_then_read(self): chunk_size = 16 * 1024 * 1024 # 16 Mb, as in remote http source read_size = 8 * 1024 # 8k, as in httplib self._test_reader_chunked(chunk_size, read_size) def test_limiting_reader(self): """Ensure limiting reader class accesses all bytes of file""" BYTES = 1024 bytes_read = 0 data = six.StringIO("*" * BYTES) for chunk in utils.LimitingReader(data, BYTES): bytes_read += len(chunk) self.assertEqual(BYTES, bytes_read) bytes_read = 0 data = six.StringIO("*" * BYTES) reader = utils.LimitingReader(data, BYTES) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertEqual(BYTES, bytes_read) def test_limiting_reader_fails(self): """Ensure limiting reader class throws exceptions if limit exceeded""" BYTES = 1024 def _consume_all_iter(): bytes_read = 0 data = six.StringIO("*" * BYTES) for chunk in utils.LimitingReader(data, BYTES - 1): bytes_read += len(chunk) self.assertRaises(exception.ImageSizeLimitExceeded, _consume_all_iter) def _consume_all_read(): bytes_read = 0 data = six.StringIO("*" * BYTES) reader = utils.LimitingReader(data, BYTES - 1) byte = reader.read(1) while len(byte) != 0: bytes_read += 1 byte = reader.read(1) self.assertRaises(exception.ImageSizeLimitExceeded, _consume_all_read) def test_get_meta_from_headers(self): resp = webob.Response() resp.headers = {"x-image-meta-name": 'test', 'x-image-meta-virtual-size': 80} result = utils.get_image_meta_from_headers(resp) self.assertEqual({'name': 'test', 'properties': {}, 'virtual_size': 80}, result) def test_get_meta_from_headers_none_virtual_size(self): resp = webob.Response() resp.headers = {"x-image-meta-name": 'test', 'x-image-meta-virtual-size': 'None'} result = utils.get_image_meta_from_headers(resp) self.assertEqual({'name': 'test', 'properties': {}, 'virtual_size': None}, result) def test_get_meta_from_headers_bad_headers(self): resp = webob.Response() resp.headers = {"x-image-meta-bad": 'test'} self.assertRaises(webob.exc.HTTPBadRequest, utils.get_image_meta_from_headers, resp) resp.headers = {"x-image-meta-": 'test'} self.assertRaises(webob.exc.HTTPBadRequest, utils.get_image_meta_from_headers, resp) resp.headers = {"x-image-meta-*": 'test'} self.assertRaises(webob.exc.HTTPBadRequest, utils.get_image_meta_from_headers, resp) def test_image_meta(self): image_meta = {'x-image-meta-size': 'test'} image_meta_properties = {'properties': {'test': "test"}} actual = utils.image_meta_to_http_headers(image_meta) actual_test2 = utils.image_meta_to_http_headers( image_meta_properties) self.assertEqual({'x-image-meta-x-image-meta-size': u'test'}, actual) self.assertEqual({'x-image-meta-property-test': u'test'}, actual_test2) def test_create_mashup_dict_with_different_core_custom_properties(self): image_meta = { 'id': 'test-123', 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': True, 'updated_at': '', 'properties': {'test_key': 'test_1234'}, } mashup_dict = utils.create_mashup_dict(image_meta) self.assertNotIn('properties', mashup_dict) self.assertEqual(image_meta['properties']['test_key'], mashup_dict['test_key']) def test_create_mashup_dict_with_same_core_custom_properties(self): image_meta = { 'id': 'test-123', 'name': 'fake_image', 'status': 'active', 'created_at': '', 'min_disk': '10G', 'min_ram': '1024M', 'protected': False, 'locations': '', 'checksum': 'c1234', 'owner': '', 'disk_format': 'raw', 'container_format': 'bare', 'size': '123456789', 'virtual_size': '123456789', 'is_public': 'public', 'deleted': True, 'updated_at': '', 'properties': {'min_ram': '2048M'}, } mashup_dict = utils.create_mashup_dict(image_meta) self.assertNotIn('properties', mashup_dict) self.assertNotEqual(image_meta['properties']['min_ram'], mashup_dict['min_ram']) self.assertEqual(image_meta['min_ram'], mashup_dict['min_ram']) def test_mutating(self): class FakeContext(object): def __init__(self): self.read_only = False class Fake(object): def __init__(self): self.context = FakeContext() def fake_function(req, context): return 'test passed' req = webob.Request.blank('/some_request') result = utils.mutating(fake_function) self.assertEqual("test passed", result(req, Fake())) def test_validate_key_cert_key(self): self.config(digest_algorithm='sha256') var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../', 'var')) keyfile = os.path.join(var_dir, 'privatekey.key') certfile = os.path.join(var_dir, 'certificate.crt') utils.validate_key_cert(keyfile, certfile) def test_validate_key_cert_no_private_key(self): with tempfile.NamedTemporaryFile('w+') as tmpf: self.assertRaises(RuntimeError, utils.validate_key_cert, "/not/a/file", tmpf.name) def test_validate_key_cert_cert_cant_read(self): with tempfile.NamedTemporaryFile('w+') as keyf: with tempfile.NamedTemporaryFile('w+') as certf: os.chmod(certf.name, 0) self.assertRaises(RuntimeError, utils.validate_key_cert, keyf.name, certf.name) def test_validate_key_cert_key_cant_read(self): with tempfile.NamedTemporaryFile('w+') as keyf: with tempfile.NamedTemporaryFile('w+') as certf: os.chmod(keyf.name, 0) self.assertRaises(RuntimeError, utils.validate_key_cert, keyf.name, certf.name) def test_invalid_digest_algorithm(self): self.config(digest_algorithm='fake_algorithm') var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../', 'var')) keyfile = os.path.join(var_dir, 'privatekey.key') certfile = os.path.join(var_dir, 'certificate.crt') self.assertRaises(ValueError, utils.validate_key_cert, keyfile, certfile) def test_valid_hostname(self): valid_inputs = ['localhost', 'glance04-a' 'G', '528491'] for input_str in valid_inputs: self.assertTrue(utils.is_valid_hostname(input_str)) def test_valid_hostname_fail(self): invalid_inputs = ['localhost.localdomain', '192.168.0.1', u'\u2603', 'glance02.stack42.local'] for input_str in invalid_inputs: self.assertFalse(utils.is_valid_hostname(input_str)) def test_valid_fqdn(self): valid_inputs = ['localhost.localdomain', 'glance02.stack42.local' 'glance04-a.stack47.local', 'img83.glance.xn--penstack-r74e.org'] for input_str in valid_inputs: self.assertTrue(utils.is_valid_fqdn(input_str)) def test_valid_fqdn_fail(self): invalid_inputs = ['localhost', '192.168.0.1', '999.88.77.6', u'\u2603.local', 'glance02.stack42'] for input_str in invalid_inputs: self.assertFalse(utils.is_valid_fqdn(input_str)) def test_valid_host_port_string(self): valid_pairs = ['10.11.12.13:80', '172.17.17.1:65535', '[fe80::a:b:c:d]:9990', 'localhost:9990', 'localhost.localdomain:9990', 'glance02.stack42.local:1234', 'glance04-a.stack47.local:1234', 'img83.glance.xn--penstack-r74e.org:13080'] for pair_str in valid_pairs: host, port = utils.parse_valid_host_port(pair_str) escaped = pair_str.startswith('[') expected_host = '%s%s%s' % ('[' if escaped else '', host, ']' if escaped else '') self.assertTrue(pair_str.startswith(expected_host)) self.assertTrue(port > 0) expected_pair = '%s:%d' % (expected_host, port) self.assertEqual(expected_pair, pair_str) def test_valid_host_port_string_fail(self): invalid_pairs = ['', '10.11.12.13', '172.17.17.1:99999', '290.12.52.80:5673', 'absurd inputs happen', u'\u2601', u'\u2603:8080', 'fe80::1', '[fe80::2]', ':5673', '[fe80::a:b:c:d]9990', 'fe80:a:b:c:d:e:f:1:2:3:4', 'fe80:a:b:c:d:e:f:g', 'fe80::1:8080', '[fe80:a:b:c:d:e:f:g]:9090', '[a:b:s:u:r:d]:fe80'] for pair in invalid_pairs: self.assertRaises(ValueError, utils.parse_valid_host_port, pair) class SplitFilterOpTestCase(test_utils.BaseTestCase): def test_less_than_operator(self): expr = 'lt:bar' returned = utils.split_filter_op(expr) self.assertEqual(('lt', 'bar'), returned) def test_less_than_equal_operator(self): expr = 'lte:bar' returned = utils.split_filter_op(expr) self.assertEqual(('lte', 'bar'), returned) def test_greater_than_operator(self): expr = 'gt:bar' returned = utils.split_filter_op(expr) self.assertEqual(('gt', 'bar'), returned) def test_greater_than_equal_operator(self): expr = 'gte:bar' returned = utils.split_filter_op(expr) self.assertEqual(('gte', 'bar'), returned) def test_not_equal_operator(self): expr = 'neq:bar' returned = utils.split_filter_op(expr) self.assertEqual(('neq', 'bar'), returned) def test_equal_operator(self): expr = 'eq:bar' returned = utils.split_filter_op(expr) self.assertEqual(('eq', 'bar'), returned) def test_in_operator(self): expr = 'in:bar' returned = utils.split_filter_op(expr) self.assertEqual(('in', 'bar'), returned) def test_split_filter_value_for_quotes(self): expr = '\"fake\\\"name\",fakename,\"fake,name\"' returned = utils.split_filter_value_for_quotes(expr) list_values = ['fake\\"name', 'fakename', 'fake,name'] self.assertEqual(list_values, returned) def test_validate_quotes(self): expr = '\"aaa\\\"aa\",bb,\"cc\"' returned = utils.validate_quotes(expr) self.assertIsNone(returned) invalid_expr = ['\"aa', 'ss\"', 'aa\"bb\"cc', '\"aa\"\"bb\"'] for expr in invalid_expr: self.assertRaises(exception.InvalidParameterValue, utils.validate_quotes, expr) def test_default_operator(self): expr = 'bar' returned = utils.split_filter_op(expr) self.assertEqual(('eq', expr), returned) class EvaluateFilterOpTestCase(test_utils.BaseTestCase): def test_less_than_operator(self): self.assertTrue(utils.evaluate_filter_op(9, 'lt', 10)) self.assertFalse(utils.evaluate_filter_op(10, 'lt', 10)) self.assertFalse(utils.evaluate_filter_op(11, 'lt', 10)) def test_less_than_equal_operator(self): self.assertTrue(utils.evaluate_filter_op(9, 'lte', 10)) self.assertTrue(utils.evaluate_filter_op(10, 'lte', 10)) self.assertFalse(utils.evaluate_filter_op(11, 'lte', 10)) def test_greater_than_operator(self): self.assertFalse(utils.evaluate_filter_op(9, 'gt', 10)) self.assertFalse(utils.evaluate_filter_op(10, 'gt', 10)) self.assertTrue(utils.evaluate_filter_op(11, 'gt', 10)) def test_greater_than_equal_operator(self): self.assertFalse(utils.evaluate_filter_op(9, 'gte', 10)) self.assertTrue(utils.evaluate_filter_op(10, 'gte', 10)) self.assertTrue(utils.evaluate_filter_op(11, 'gte', 10)) def test_not_equal_operator(self): self.assertTrue(utils.evaluate_filter_op(9, 'neq', 10)) self.assertFalse(utils.evaluate_filter_op(10, 'neq', 10)) self.assertTrue(utils.evaluate_filter_op(11, 'neq', 10)) def test_equal_operator(self): self.assertFalse(utils.evaluate_filter_op(9, 'eq', 10)) self.assertTrue(utils.evaluate_filter_op(10, 'eq', 10)) self.assertFalse(utils.evaluate_filter_op(11, 'eq', 10)) def test_invalid_operator(self): self.assertRaises(exception.InvalidFilterOperatorValue, utils.evaluate_filter_op, '10', 'bar', '8') glance-12.0.0/glance/tests/unit/common/test_wsgi.py0000664000567000056710000006647312701407047023432 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import gettext import os import socket from babel import localedata import eventlet.patcher import fixtures import mock from oslo_concurrency import processutils from oslo_serialization import jsonutils import routes import six import webob from glance.api.v1 import router as router_v1 from glance.api.v2 import router as router_v2 from glance.common import exception from glance.common import utils from glance.common import wsgi from glance import i18n from glance.tests import utils as test_utils class RequestTest(test_utils.BaseTestCase): def _set_expected_languages(self, all_locales=None, avail_locales=None): if all_locales is None: all_locales = [] # Override localedata.locale_identifiers to return some locales. def returns_some_locales(*args, **kwargs): return all_locales self.stubs.Set(localedata, 'locale_identifiers', returns_some_locales) # Override gettext.find to return other than None for some languages. def fake_gettext_find(lang_id, *args, **kwargs): found_ret = '/glance/%s/LC_MESSAGES/glance.mo' % lang_id if avail_locales is None: # All locales are available. return found_ret languages = kwargs['languages'] if languages[0] in avail_locales: return found_ret return None self.stubs.Set(gettext, 'find', fake_gettext_find) def test_content_range(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Range"] = 'bytes 10-99/*' range_ = request.get_content_range() self.assertEqual(10, range_.start) self.assertEqual(100, range_.stop) # non-inclusive self.assertIsNone(range_.length) def test_content_range_invalid(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Range"] = 'bytes=0-99' self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_range) def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123') self.assertRaises(exception.InvalidContentType, request.get_content_type, ('application/xml',)) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "text/html" self.assertRaises(exception.InvalidContentType, request.get_content_type, ('application/xml',)) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type(('application/json',)) self.assertEqual("application/json", result) def test_content_type_from_accept_xml(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept_json(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept_xml_json(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml, application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept_json_xml_quality(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ("application/json; q=0.3, " "application/xml; q=0.9") result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_language_accept_default(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept-Language"] = "zz-ZZ,zz;q=0.8" result = request.best_match_language() self.assertIsNone(result) def test_language_accept_none(self): request = wsgi.Request.blank('/tests/123') result = request.best_match_language() self.assertIsNone(result) def test_best_match_language_expected(self): # If Accept-Language is a supported language, best_match_language() # returns it. self._set_expected_languages(all_locales=['it']) req = wsgi.Request.blank('/', headers={'Accept-Language': 'it'}) self.assertEqual('it', req.best_match_language()) def test_request_match_language_unexpected(self): # If Accept-Language is a language we do not support, # best_match_language() returns None. self._set_expected_languages(all_locales=['it']) req = wsgi.Request.blank('/', headers={'Accept-Language': 'zh'}) self.assertIsNone(req.best_match_language()) @mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match') def test_best_match_language_unknown(self, mock_best_match): # Test that we are actually invoking language negotiation by webop request = wsgi.Request.blank('/') accepted = 'unknown-lang' request.headers = {'Accept-Language': accepted} mock_best_match.return_value = None self.assertIsNone(request.best_match_language()) # If Accept-Language is missing or empty, match should be None request.headers = {'Accept-Language': ''} self.assertIsNone(request.best_match_language()) request.headers.pop('Accept-Language') self.assertIsNone(request.best_match_language()) def test_http_error_response_codes(self): sample_id, member_id, tag_val, task_id = 'abc', '123', '1', '2' """Makes sure v1 unallowed methods return 405""" unallowed_methods = [ ('/images', ['PUT', 'DELETE', 'HEAD', 'PATCH']), ('/images/detail', ['POST', 'PUT', 'DELETE', 'PATCH']), ('/images/%s' % sample_id, ['POST', 'PATCH']), ('/images/%s/members' % sample_id, ['POST', 'DELETE', 'HEAD', 'PATCH']), ('/images/%s/members/%s' % (sample_id, member_id), ['POST', 'HEAD', 'PATCH']), ] api = test_utils.FakeAuthMiddleware(router_v1.API(routes.Mapper())) for uri, methods in unallowed_methods: for method in methods: req = webob.Request.blank(uri) req.method = method res = req.get_response(api) self.assertEqual(405, res.status_int) """Makes sure v2 unallowed methods return 405""" unallowed_methods = [ ('/schemas/image', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/images', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/member', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/members', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/task', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/schemas/tasks', ['POST', 'PUT', 'DELETE', 'PATCH', 'HEAD']), ('/images', ['PUT', 'DELETE', 'PATCH', 'HEAD']), ('/images/%s' % sample_id, ['POST', 'PUT', 'HEAD']), ('/images/%s/file' % sample_id, ['POST', 'DELETE', 'PATCH', 'HEAD']), ('/images/%s/tags/%s' % (sample_id, tag_val), ['GET', 'POST', 'PATCH', 'HEAD']), ('/images/%s/members' % sample_id, ['PUT', 'DELETE', 'PATCH', 'HEAD']), ('/images/%s/members/%s' % (sample_id, member_id), ['POST', 'PATCH', 'HEAD']), ('/tasks', ['PUT', 'DELETE', 'PATCH', 'HEAD']), ('/tasks/%s' % task_id, ['POST', 'PUT', 'PATCH', 'HEAD']), ] api = test_utils.FakeAuthMiddleware(router_v2.API(routes.Mapper())) for uri, methods in unallowed_methods: for method in methods: req = webob.Request.blank(uri) req.method = method res = req.get_response(api) self.assertEqual(405, res.status_int) # Makes sure not implemented methods return 405 req = webob.Request.blank('/schemas/image') req.method = 'NonexistentMethod' res = req.get_response(api) self.assertEqual(405, res.status_int) class ResourceTest(test_utils.BaseTestCase): def test_get_action_args(self): env = { 'wsgiorg.routing_args': [ None, { 'controller': None, 'format': None, 'action': 'update', 'id': 12, }, ], } expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_invalid_index(self): env = {'wsgiorg.routing_args': []} expected = {} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_del_controller_error(self): actions = {'format': None, 'action': 'update', 'id': 12} env = {'wsgiorg.routing_args': [None, actions]} expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_get_action_args_del_format_error(self): actions = {'action': 'update', 'id': 12} env = {'wsgiorg.routing_args': [None, actions]} expected = {'action': 'update', 'id': 12} actual = wsgi.Resource(None, None, None).get_action_args(env) self.assertEqual(expected, actual) def test_dispatch(self): class Controller(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) actual = resource.dispatch(Controller(), 'index', 'on', pants='off') expected = ('on', 'off') self.assertEqual(expected, actual) def test_dispatch_default(self): class Controller(object): def default(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) actual = resource.dispatch(Controller(), 'index', 'on', pants='off') expected = ('on', 'off') self.assertEqual(expected, actual) def test_dispatch_no_default(self): class Controller(object): def show(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(None, None, None) self.assertRaises(AttributeError, resource.dispatch, Controller(), 'index', 'on', pants='off') def test_call(self): class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(FakeController(), None, None) def dispatch(self, obj, action, *args, **kwargs): if isinstance(obj, wsgi.JSONRequestDeserializer): return [] if isinstance(obj, wsgi.JSONResponseSerializer): raise webob.exc.HTTPForbidden() self.stubs.Set(wsgi.Resource, 'dispatch', dispatch) request = wsgi.Request.blank('/') response = resource.__call__(request) self.assertIsInstance(response, webob.exc.HTTPForbidden) self.assertEqual(403, response.status_code) def test_call_raises_exception(self): class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) resource = wsgi.Resource(FakeController(), None, None) def dispatch(self, obj, action, *args, **kwargs): raise Exception("test exception") self.stubs.Set(wsgi.Resource, 'dispatch', dispatch) request = wsgi.Request.blank('/') response = resource.__call__(request) self.assertIsInstance(response, webob.exc.HTTPInternalServerError) self.assertEqual(500, response.status_code) @mock.patch.object(wsgi, 'translate_exception') def test_resource_call_error_handle_localized(self, mock_translate_exception): class Controller(object): def delete(self, req, identity): raise webob.exc.HTTPBadRequest(explanation='Not Found') actions = {'action': 'delete', 'identity': 12} env = {'wsgiorg.routing_args': [None, actions]} request = wsgi.Request.blank('/tests/123', environ=env) message_es = 'No Encontrado' resource = wsgi.Resource(Controller(), wsgi.JSONRequestDeserializer(), None) translated_exc = webob.exc.HTTPBadRequest(message_es) mock_translate_exception.return_value = translated_exc e = self.assertRaises(webob.exc.HTTPBadRequest, resource, request) self.assertEqual(message_es, str(e)) @mock.patch.object(webob.acceptparse.AcceptLanguage, 'best_match') @mock.patch.object(i18n, 'translate') def test_translate_exception(self, mock_translate, mock_best_match): mock_translate.return_value = 'No Encontrado' mock_best_match.return_value = 'de' req = wsgi.Request.blank('/tests/123') req.headers["Accept-Language"] = "de" e = webob.exc.HTTPNotFound(explanation='Not Found') e = wsgi.translate_exception(req, e) self.assertEqual('No Encontrado', e.explanation) def test_response_headers_encoded(self): # prepare environment for_openstack_comrades = \ u'\u0417\u0430 \u043e\u043f\u0435\u043d\u0441\u0442\u0435\u043a, ' \ u'\u0442\u043e\u0432\u0430\u0440\u0438\u0449\u0438' class FakeController(object): def index(self, shirt, pants=None): return (shirt, pants) class FakeSerializer(object): def index(self, response, result): response.headers['unicode_test'] = for_openstack_comrades # make request resource = wsgi.Resource(FakeController(), None, FakeSerializer()) actions = {'action': 'index'} env = {'wsgiorg.routing_args': [None, actions]} request = wsgi.Request.blank('/tests/123', environ=env) response = resource.__call__(request) # ensure it has been encoded correctly value = (response.headers['unicode_test'].decode('utf-8') if six.PY2 else response.headers['unicode_test']) self.assertEqual(for_openstack_comrades, value) class JSONResponseSerializerTest(test_utils.BaseTestCase): def test_to_json(self): fixture = {"key": "value"} expected = b'{"key": "value"}' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_to_json_with_date_format_value(self): fixture = {"date": datetime.datetime(1901, 3, 8, 2)} expected = b'{"date": "1901-03-08T02:00:00.000000"}' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_to_json_with_more_deep_format(self): fixture = {"is_public": True, "name": [{"name1": "test"}]} expected = {"is_public": True, "name": [{"name1": "test"}]} actual = wsgi.JSONResponseSerializer().to_json(fixture) actual = jsonutils.loads(actual) for k in expected: self.assertEqual(expected[k], actual[k]) def test_to_json_with_set(self): fixture = set(["foo"]) expected = b'["foo"]' actual = wsgi.JSONResponseSerializer().to_json(fixture) self.assertEqual(expected, actual) def test_default(self): fixture = {"key": "value"} response = webob.Response() wsgi.JSONResponseSerializer().default(response, fixture) self.assertEqual(200, response.status_int) content_types = [h for h in response.headerlist if h[0] == 'Content-Type'] self.assertEqual(1, len(content_types)) self.assertEqual('application/json', response.content_type) self.assertEqual(b'{"key": "value"}', response.body) class JSONRequestDeserializerTest(test_utils.BaseTestCase): def test_has_body_no_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' request.headers.pop('Content-Length') self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_has_body_zero_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' request.headers['Content-Length'] = 0 self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_has_body_has_content_length(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'asdf' self.assertIn('Content-Length', request.headers) self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request)) def test_no_body_no_content_length(self): request = wsgi.Request.blank('/') self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request)) def test_from_json(self): fixture = '{"key": "value"}' expected = {"key": "value"} actual = wsgi.JSONRequestDeserializer().from_json(fixture) self.assertEqual(expected, actual) def test_from_json_malformed(self): fixture = 'kjasdklfjsklajf' self.assertRaises(webob.exc.HTTPBadRequest, wsgi.JSONRequestDeserializer().from_json, fixture) def test_default_no_body(self): request = wsgi.Request.blank('/') actual = wsgi.JSONRequestDeserializer().default(request) expected = {} self.assertEqual(expected, actual) def test_default_with_body(self): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'{"key": "value"}' actual = wsgi.JSONRequestDeserializer().default(request) expected = {"body": {"key": "value"}} self.assertEqual(expected, actual) def test_has_body_has_transfer_encoding(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked')) def test_has_body_multiple_transfer_encoding(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked, gzip')) def test_has_body_invalid_transfer_encoding(self): self.assertFalse(self._check_transfer_encoding( transfer_encoding='invalid', content_length=0)) def test_has_body_invalid_transfer_encoding_with_content_length(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='invalid', content_length=5)) def test_has_body_valid_transfer_encoding_with_content_length(self): self.assertTrue(self._check_transfer_encoding( transfer_encoding='chunked', content_length=0)) def _check_transfer_encoding(self, transfer_encoding=None, content_length=None): request = wsgi.Request.blank('/') request.method = 'POST' request.body = b'fake_body' request.headers['transfer-encoding'] = transfer_encoding if content_length is not None: request.headers['content-length'] = content_length return wsgi.JSONRequestDeserializer().has_body(request) def test_get_bind_addr_default_value(self): expected = ('0.0.0.0', '123456') actual = wsgi.get_bind_addr(default_port="123456") self.assertEqual(expected, actual) class ServerTest(test_utils.BaseTestCase): def test_create_pool(self): """Ensure the wsgi thread pool is an eventlet.greenpool.GreenPool.""" actual = wsgi.Server(threads=1).create_pool() self.assertIsInstance(actual, eventlet.greenpool.GreenPool) @mock.patch.object(wsgi.Server, 'configure_socket') def test_http_keepalive(self, mock_configure_socket): self.config(http_keepalive=False) self.config(workers=0) server = wsgi.Server(threads=1) server.sock = 'fake_socket' # mocking eventlet.wsgi server method to check it is called with # configured 'http_keepalive' value. with mock.patch.object(eventlet.wsgi, 'server') as mock_server: fake_application = "fake-application" server.start(fake_application, 0) server.wait() mock_server.assert_called_once_with('fake_socket', fake_application, log=server._logger, debug=False, custom_pool=server.pool, keepalive=False, socket_timeout=900) def test_number_of_workers(self): """Ensure the default number of workers matches num cpus.""" def pid(): i = 1 while True: i = i + 1 yield i with mock.patch.object(os, 'fork') as mock_fork: mock_fork.side_effect = pid server = wsgi.Server() server.configure = mock.Mock() fake_application = "fake-application" server.start(fake_application, None) self.assertEqual(processutils.get_worker_count(), len(server.children)) class TestHelpers(test_utils.BaseTestCase): def test_headers_are_unicode(self): """ Verifies that the headers returned by conversion code are unicode. Headers are passed via http in non-testing mode, which automatically converts them to unicode. Verifying that the method does the conversion proves that we aren't passing data that works in tests but will fail in production. """ fixture = {'name': 'fake public image', 'is_public': True, 'size': 19, 'location': "file:///tmp/glance-tests/2", 'properties': {'distro': 'Ubuntu 10.04 LTS'}} headers = utils.image_meta_to_http_headers(fixture) for k, v in six.iteritems(headers): self.assertIsInstance(v, six.text_type) def test_data_passed_properly_through_headers(self): """ Verifies that data is the same after being passed through headers """ fixture = {'name': 'fake public image', 'is_public': True, 'deleted': False, 'name': None, 'size': 19, 'location': "file:///tmp/glance-tests/2", 'properties': {'distro': 'Ubuntu 10.04 LTS'}} headers = utils.image_meta_to_http_headers(fixture) class FakeResponse(object): pass response = FakeResponse() response.headers = headers result = utils.get_image_meta_from_headers(response) for k, v in six.iteritems(fixture): if v is not None: self.assertEqual(v, result[k]) else: self.assertNotIn(k, result) class GetSocketTestCase(test_utils.BaseTestCase): def setUp(self): super(GetSocketTestCase, self).setUp() self.useFixture(fixtures.MonkeyPatch( "glance.common.wsgi.get_bind_addr", lambda x: ('192.168.0.13', 1234))) addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)), (2, 2, 17, '', ('192.168.0.13', 80)), (2, 3, 0, '', ('192.168.0.13', 80))] self.useFixture(fixtures.MonkeyPatch( "glance.common.wsgi.socket.getaddrinfo", lambda *x: addr_info_list)) self.useFixture(fixtures.MonkeyPatch( "glance.common.wsgi.time.time", mock.Mock(side_effect=[0, 1, 5, 10, 20, 35]))) self.useFixture(fixtures.MonkeyPatch( "glance.common.wsgi.utils.validate_key_cert", lambda *x: None)) wsgi.CONF.cert_file = '/etc/ssl/cert' wsgi.CONF.key_file = '/etc/ssl/key' wsgi.CONF.ca_file = '/etc/ssl/ca_cert' wsgi.CONF.tcp_keepidle = 600 def test_correct_configure_socket(self): mock_socket = mock.Mock() self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.ssl.wrap_socket', mock_socket)) self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.eventlet.listen', lambda *x, **y: mock_socket)) server = wsgi.Server() server.default_port = 1234 server.configure_socket() self.assertIn(mock.call.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1), mock_socket.mock_calls) self.assertIn(mock.call.setsockopt( socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), mock_socket.mock_calls) if hasattr(socket, 'TCP_KEEPIDLE'): self.assertIn(mock.call().setsockopt( socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, wsgi.CONF.tcp_keepidle), mock_socket.mock_calls) def test_get_socket_without_all_ssl_reqs(self): wsgi.CONF.key_file = None self.assertRaises(RuntimeError, wsgi.get_socket, 1234) def test_get_socket_with_bind_problems(self): self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.eventlet.listen', mock.Mock(side_effect=( [wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None])))) self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.ssl.wrap_socket', lambda *x, **y: None)) self.assertRaises(RuntimeError, wsgi.get_socket, 1234) def test_get_socket_with_unexpected_socket_errno(self): self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.eventlet.listen', mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM)))) self.useFixture(fixtures.MonkeyPatch( 'glance.common.wsgi.ssl.wrap_socket', lambda *x, **y: None)) self.assertRaises(wsgi.socket.error, wsgi.get_socket, 1234) glance-12.0.0/glance/tests/unit/common/scripts/0000775000567000056710000000000012701407204022512 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/common/scripts/__init__.py0000664000567000056710000000000012701407047024616 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/common/scripts/image_import/0000775000567000056710000000000012701407204025166 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/common/scripts/image_import/__init__.py0000664000567000056710000000000012701407047027272 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/common/scripts/image_import/test_main.py0000664000567000056710000001227212701407047027534 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from six.moves import urllib import glance.common.exception as exception from glance.common.scripts.image_import import main as image_import_script from glance.common.scripts import utils from glance.common import store_utils import glance.tests.utils as test_utils class TestImageImport(test_utils.BaseTestCase): def setUp(self): super(TestImageImport, self).setUp() def test_run(self): with mock.patch.object(image_import_script, '_execute') as mock_execute: task_id = mock.ANY context = mock.ANY task_repo = mock.ANY image_repo = mock.ANY image_factory = mock.ANY image_import_script.run(task_id, context, task_repo, image_repo, image_factory) mock_execute.assert_called_once_with(task_id, task_repo, image_repo, image_factory) def test_import_image(self): image_id = mock.ANY image = mock.Mock(image_id=image_id) image_repo = mock.Mock() image_repo.get.return_value = image image_factory = mock.ANY task_input = mock.Mock(image_properties=mock.ANY) uri = mock.ANY with mock.patch.object(image_import_script, 'create_image') as mock_create_image: with mock.patch.object(image_import_script, 'set_image_data') as mock_set_img_data: mock_create_image.return_value = image self.assertEqual( image_id, image_import_script.import_image(image_repo, image_factory, task_input, None, uri)) # Check image is in saving state before image_repo.save called self.assertEqual('saving', image.status) self.assertTrue(image_repo.save.called) mock_set_img_data.assert_called_once_with(image, uri, None) self.assertTrue(image_repo.get.called) self.assertTrue(image_repo.save.called) def test_create_image(self): image = mock.ANY image_repo = mock.Mock() image_factory = mock.Mock() image_factory.new_image.return_value = image # Note: include some base properties to ensure no error while # attempting to verify them image_properties = {'disk_format': 'foo', 'id': 'bar'} self.assertEqual(image, image_import_script.create_image(image_repo, image_factory, image_properties, None)) @mock.patch.object(utils, 'get_image_data_iter') def test_set_image_data_http(self, mock_image_iter): uri = 'http://www.example.com' image = mock.Mock() mock_image_iter.return_value = test_utils.FakeHTTPResponse() self.assertIsNone(image_import_script.set_image_data(image, uri, None)) def test_set_image_data_http_error(self): uri = 'blahhttp://www.example.com' image = mock.Mock() self.assertRaises(urllib.error.URLError, image_import_script.set_image_data, image, uri, None) @mock.patch.object(image_import_script, 'create_image') @mock.patch.object(image_import_script, 'set_image_data') @mock.patch.object(store_utils, 'delete_image_location_from_backend') def test_import_image_failed_with_expired_token( self, mock_delete_data, mock_set_img_data, mock_create_image): image_id = mock.ANY locations = ['location'] image = mock.Mock(image_id=image_id, locations=locations) image_repo = mock.Mock() image_repo.get.side_effect = [image, exception.NotAuthenticated] image_factory = mock.ANY task_input = mock.Mock(image_properties=mock.ANY) uri = mock.ANY mock_create_image.return_value = image self.assertRaises(exception.NotAuthenticated, image_import_script.import_image, image_repo, image_factory, task_input, None, uri) self.assertEqual(1, mock_set_img_data.call_count) mock_delete_data.assert_called_once_with( mock_create_image().context, image_id, 'location') glance-12.0.0/glance/tests/unit/common/scripts/test_scripts_utils.py0000664000567000056710000001257012701407047027044 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from six.moves import urllib from glance.common import exception from glance.common.scripts import utils as script_utils import glance.tests.utils as test_utils class TestScriptsUtils(test_utils.BaseTestCase): def setUp(self): super(TestScriptsUtils, self).setUp() def test_get_task(self): task = mock.ANY task_repo = mock.Mock(return_value=task) task_id = mock.ANY self.assertEqual(task, script_utils.get_task(task_repo, task_id)) def test_unpack_task_input(self): task_input = {"import_from": "foo", "import_from_format": "bar", "image_properties": "baz"} task = mock.Mock(task_input=task_input) self.assertEqual(task_input, script_utils.unpack_task_input(task)) def test_unpack_task_input_error(self): task_input1 = {"import_from_format": "bar", "image_properties": "baz"} task_input2 = {"import_from": "foo", "image_properties": "baz"} task_input3 = {"import_from": "foo", "import_from_format": "bar"} task1 = mock.Mock(task_input=task_input1) task2 = mock.Mock(task_input=task_input2) task3 = mock.Mock(task_input=task_input3) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task1) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task2) self.assertRaises(exception.Invalid, script_utils.unpack_task_input, task3) def test_set_base_image_properties(self): properties = {} script_utils.set_base_image_properties(properties) self.assertIn('disk_format', properties) self.assertIn('container_format', properties) self.assertEqual('qcow2', properties['disk_format']) self.assertEqual('bare', properties['container_format']) def test_set_base_image_properties_none(self): properties = None script_utils.set_base_image_properties(properties) self.assertIsNone(properties) def test_set_base_image_properties_not_empty(self): properties = {'disk_format': 'vmdk', 'container_format': 'bare'} script_utils.set_base_image_properties(properties) self.assertIn('disk_format', properties) self.assertIn('container_format', properties) self.assertEqual('vmdk', properties.get('disk_format')) self.assertEqual('bare', properties.get('container_format')) def test_validate_location_http(self): location = 'http://example.com' self.assertEqual(location, script_utils.validate_location_uri(location)) def test_validate_location_https(self): location = 'https://example.com' self.assertEqual(location, script_utils.validate_location_uri(location)) def test_validate_location_none_error(self): self.assertRaises(exception.BadStoreUri, script_utils.validate_location_uri, '') def test_validate_location_file_location_error(self): self.assertRaises(exception.BadStoreUri, script_utils.validate_location_uri, "file:///tmp") self.assertRaises(exception.BadStoreUri, script_utils.validate_location_uri, "filesystem:///tmp") def test_validate_location_unsupported_error(self): location = 'swift' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'swift+http' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'swift+https' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'swift+config' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'vsphere' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'sheepdog://' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 's3+https://' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'rbd://' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) location = 'cinder://' self.assertRaises(urllib.error.URLError, script_utils.validate_location_uri, location) glance-12.0.0/glance/tests/unit/common/test_client.py0000664000567000056710000000571212701407047023724 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mox3 import mox from six.moves import http_client import testtools from glance.common import auth from glance.common import client from glance.tests import utils class TestClient(testtools.TestCase): def setUp(self): super(TestClient, self).setUp() self.mock = mox.Mox() self.mock.StubOutWithMock(http_client.HTTPConnection, 'request') self.mock.StubOutWithMock(http_client.HTTPConnection, 'getresponse') self.endpoint = 'example.com' self.client = client.BaseClient(self.endpoint, port=9191, auth_token=u'abc123') def tearDown(self): super(TestClient, self).tearDown() self.mock.UnsetStubs() def test_make_auth_plugin(self): creds = {'strategy': 'keystone'} insecure = False configure_via_auth = True self.mock.StubOutWithMock(auth, 'get_plugin_from_strategy') auth.get_plugin_from_strategy('keystone', creds, insecure, configure_via_auth) self.mock.ReplayAll() self.client.make_auth_plugin(creds, insecure) self.mock.VerifyAll() def test_http_encoding_headers(self): http_client.HTTPConnection.request( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) # Lets fake the response # returned by http_client fake = utils.FakeHTTPResponse(data=b"Ok") http_client.HTTPConnection.getresponse().AndReturn(fake) self.mock.ReplayAll() headers = {"test": u'ni\xf1o'} resp = self.client.do_request('GET', '/v1/images/detail', headers=headers) self.assertEqual(fake, resp) def test_http_encoding_params(self): http_client.HTTPConnection.request( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) # Lets fake the response # returned by http_client fake = utils.FakeHTTPResponse(data=b"Ok") http_client.HTTPConnection.getresponse().AndReturn(fake) self.mock.ReplayAll() params = {"test": u'ni\xf1o'} resp = self.client.do_request('GET', '/v1/images/detail', params=params) self.assertEqual(fake, resp) glance-12.0.0/glance/tests/unit/common/test_semver.py0000664000567000056710000000641612701407047023751 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import exception from glance.common import semver_db from glance.tests import utils as test_utils class SemVerTestCase(test_utils.BaseTestCase): def test_long_conversion(self): initial = '1.2.3-beta+07.17.2014' v = semver_db.parse(initial) l, prerelease, build = v.__composite_values__() v2 = semver_db.DBVersion(l, prerelease, build) self.assertEqual(initial, str(v2)) def test_major_comparison_as_long(self): v1 = semver_db.parse("1.1.100") v2 = semver_db.parse("2.0.0") self.assertTrue(v2.__composite_values__()[0] > v1.__composite_values__()[0]) def test_minor_comparison_as_long(self): v1 = semver_db.parse("1.1.100") v2 = semver_db.parse("2.0.0") self.assertTrue(v2.__composite_values__()[0] > v1.__composite_values__()[0]) def test_patch_comparison_as_long(self): v1 = semver_db.parse("1.1.1") v2 = semver_db.parse("1.1.100") self.assertTrue(v2.__composite_values__()[0] > v1.__composite_values__()[0]) def test_label_comparison_as_long(self): v1 = semver_db.parse("1.1.1-alpha") v2 = semver_db.parse("1.1.1") self.assertTrue(v2.__composite_values__()[0] > v1.__composite_values__()[0]) def test_label_comparison_as_string(self): versions = [ semver_db.parse("1.1.1-0.10.a.23.y.255").__composite_values__()[1], semver_db.parse("1.1.1-0.10.z.23.x.255").__composite_values__()[1], semver_db.parse("1.1.1-0.10.z.23.y.255").__composite_values__()[1], semver_db.parse("1.1.1-0.10.z.23.y.256").__composite_values__()[1], semver_db.parse("1.1.1-0.10.z.24.y.255").__composite_values__()[1], semver_db.parse("1.1.1-0.11.z.24.y.255").__composite_values__()[1], semver_db.parse("1.1.1-1.11.z.24.y.255").__composite_values__()[1], semver_db.parse("1.1.1-alp.1.2.3.4.5.6").__composite_values__()[1]] for i in range(len(versions) - 1): self.assertLess(versions[i], versions[i + 1]) def test_too_large_version(self): version1 = '1.1.65536' version2 = '1.65536.1' version3 = '65536.1.1' self.assertRaises(exception.InvalidVersion, semver_db.parse, version1) self.assertRaises(exception.InvalidVersion, semver_db.parse, version2) self.assertRaises(exception.InvalidVersion, semver_db.parse, version3) def test_too_long_numeric_segments(self): version = semver_db.parse('1.0.0-alpha.1234567') self.assertRaises(exception.InvalidVersion, version.__composite_values__) glance-12.0.0/glance/tests/unit/common/test_location_strategy.py0000664000567000056710000002124412701407047026176 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import stevedore from glance.common import location_strategy from glance.common.location_strategy import location_order from glance.common.location_strategy import store_type from glance.tests.unit import base class TestLocationStrategy(base.IsolatedUnitTest): """Test routines in glance.common.location_strategy""" def _set_original_strategies(self, original_strategies): for name in location_strategy._available_strategies.keys(): if name not in original_strategies: del location_strategy._available_strategies[name] def setUp(self): super(TestLocationStrategy, self).setUp() original_strategies = ['location_order', 'store_type'] self.addCleanup(self._set_original_strategies, original_strategies) def test_load_strategy_modules(self): modules = location_strategy._load_strategies() # By default we have two built-in strategy modules. self.assertEqual(2, len(modules)) self.assertEqual(set(['location_order', 'store_type']), set(modules.keys())) self.assertEqual(location_strategy._available_strategies, modules) def test_load_strategy_module_with_deduplicating(self): modules = ['module1', 'module2'] def _fake_stevedore_extension_manager(*args, **kwargs): ret = lambda: None ret.names = lambda: modules return ret def _fake_stevedore_driver_manager(*args, **kwargs): ret = lambda: None ret.driver = lambda: None ret.driver.__name__ = kwargs['name'] # Module 1 and 2 has a same strategy name ret.driver.get_strategy_name = lambda: 'module_name' ret.driver.init = lambda: None return ret self.stub = self.stubs.Set(stevedore.extension, "ExtensionManager", _fake_stevedore_extension_manager) self.stub = self.stubs.Set(stevedore.driver, "DriverManager", _fake_stevedore_driver_manager) loaded_modules = location_strategy._load_strategies() self.assertEqual(1, len(loaded_modules)) self.assertIn('module_name', loaded_modules) # Skipped module #2, duplicated one. self.assertEqual('module1', loaded_modules['module_name'].__name__) def test_load_strategy_module_with_init_exception(self): modules = ['module_init_exception', 'module_good'] def _fake_stevedore_extension_manager(*args, **kwargs): ret = lambda: None ret.names = lambda: modules return ret def _fake_stevedore_driver_manager(*args, **kwargs): if kwargs['name'] == 'module_init_exception': raise Exception('strategy module failed to initialize.') else: ret = lambda: None ret.driver = lambda: None ret.driver.__name__ = kwargs['name'] ret.driver.get_strategy_name = lambda: kwargs['name'] ret.driver.init = lambda: None return ret self.stub = self.stubs.Set(stevedore.extension, "ExtensionManager", _fake_stevedore_extension_manager) self.stub = self.stubs.Set(stevedore.driver, "DriverManager", _fake_stevedore_driver_manager) loaded_modules = location_strategy._load_strategies() self.assertEqual(1, len(loaded_modules)) self.assertIn('module_good', loaded_modules) # Skipped module #1, initialize failed one. self.assertEqual('module_good', loaded_modules['module_good'].__name__) def test_verify_valid_location_strategy(self): for strategy_name in ['location_order', 'store_type']: self.config(location_strategy=strategy_name) location_strategy.verify_location_strategy() def test_verify_invalid_location_strategy(self): strategy = 'invalid_strategy' self.config(location_strategy=strategy) self.assertRaises(RuntimeError, location_strategy.verify_location_strategy, strategy) def test_get_ordered_locations_with_none_or_empty_locations(self): self.assertEqual([], location_strategy.get_ordered_locations(None)) self.assertEqual([], location_strategy.get_ordered_locations([])) def test_get_ordered_locations(self): self.config(location_strategy='location_order') original_locs = [{'url': 'loc1'}, {'url': 'loc2'}] ordered_locs = location_strategy.get_ordered_locations(original_locs) # Original location list should remain unchanged self.assertNotEqual(id(original_locs), id(ordered_locs)) self.assertEqual(original_locs, ordered_locs) def test_choose_best_location_with_none_or_empty_locations(self): self.assertIsNone(location_strategy.choose_best_location(None)) self.assertIsNone(location_strategy.choose_best_location([])) def test_choose_best_location(self): self.config(location_strategy='location_order') original_locs = [{'url': 'loc1'}, {'url': 'loc2'}] best_loc = location_strategy.choose_best_location(original_locs) # Deep copy protect original location. self.assertNotEqual(id(original_locs), id(best_loc)) self.assertEqual(original_locs[0], best_loc) class TestLocationOrderStrategyModule(base.IsolatedUnitTest): """Test routines in glance.common.location_strategy.location_order""" def test_get_ordered_locations(self): original_locs = [{'url': 'loc1'}, {'url': 'loc2'}] ordered_locs = location_order.get_ordered_locations(original_locs) # The result will ordered by original natural order. self.assertEqual(original_locs, ordered_locs) class TestStoreTypeStrategyModule(base.IsolatedUnitTest): """Test routines in glance.common.location_strategy.store_type""" def test_get_ordered_locations(self): self.config(store_type_preference=[' rbd', 'sheepdog ', ' filesystem', 'swift ', ' http ', 's3'], group='store_type_location_strategy') locs = [{'url': 'file://image0', 'metadata': {'idx': 3}}, {'url': 'rbd://image1', 'metadata': {'idx': 0}}, {'url': 's3://image2', 'metadata': {'idx': 7}}, {'url': 'file://image3', 'metadata': {'idx': 4}}, {'url': 'swift://image4', 'metadata': {'idx': 6}}, {'url': 'cinder://image5', 'metadata': {'idx': 8}}, {'url': 'file://image6', 'metadata': {'idx': 5}}, {'url': 'rbd://image7', 'metadata': {'idx': 1}}, {'url': 'sheepdog://image8', 'metadata': {'idx': 2}}] ordered_locs = store_type.get_ordered_locations(copy.deepcopy(locs)) locs.sort(key=lambda loc: loc['metadata']['idx']) # The result will ordered by preferred store type order. self.assertEqual(locs, ordered_locs) def test_get_ordered_locations_with_invalid_store_name(self): self.config(store_type_preference=[' rbd', 'sheepdog ', 'invalid', 'swift ', ' http ', 's3'], group='store_type_location_strategy') locs = [{'url': 'file://image0', 'metadata': {'idx': 5}}, {'url': 'rbd://image1', 'metadata': {'idx': 0}}, {'url': 's3://image2', 'metadata': {'idx': 4}}, {'url': 'file://image3', 'metadata': {'idx': 6}}, {'url': 'swift://image4', 'metadata': {'idx': 3}}, {'url': 'cinder://image5', 'metadata': {'idx': 7}}, {'url': 'file://image6', 'metadata': {'idx': 8}}, {'url': 'rbd://image7', 'metadata': {'idx': 1}}, {'url': 'sheepdog://image8', 'metadata': {'idx': 2}}] ordered_locs = store_type.get_ordered_locations(copy.deepcopy(locs)) locs.sort(key=lambda loc: loc['metadata']['idx']) # The result will ordered by preferred store type order. self.assertEqual(locs, ordered_locs) glance-12.0.0/glance/tests/unit/common/test_scripts.py0000664000567000056710000000264212701407047024134 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import glance.common.scripts as scripts from glance.common.scripts.image_import import main as image_import import glance.tests.utils as test_utils class TestScripts(test_utils.BaseTestCase): def setUp(self): super(TestScripts, self).setUp() def test_run_task(self): task_id = mock.ANY task_type = 'import' context = mock.ANY task_repo = mock.ANY image_repo = mock.ANY image_factory = mock.ANY with mock.patch.object(image_import, 'run') as mock_run: scripts.run_task(task_id, task_type, context, task_repo, image_repo, image_factory) mock_run.assert_called_once_with(task_id, context, task_repo, image_repo, image_factory) glance-12.0.0/glance/tests/unit/common/test_config.py0000664000567000056710000001006412701407047023707 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import shutil import fixtures import oslo_middleware from oslotest import moxstubout from glance.api.middleware import context from glance.common import config from glance.tests import utils as test_utils class TestPasteApp(test_utils.BaseTestCase): def setUp(self): super(TestPasteApp, self).setUp() mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.stubs = mox_fixture.stubs def _do_test_load_paste_app(self, expected_app_type, make_paste_file=True, paste_flavor=None, paste_config_file=None, paste_append=None): def _writeto(path, str): with open(path, 'w') as f: f.write(str or '') f.flush() def _appendto(orig, copy, str): shutil.copy(orig, copy) with open(copy, 'a') as f: f.write(str or '') f.flush() self.config(flavor=paste_flavor, config_file=paste_config_file, group='paste_deploy') temp_dir = self.useFixture(fixtures.TempDir()).path temp_file = os.path.join(temp_dir, 'testcfg.conf') _writeto(temp_file, '[DEFAULT]\n') config.parse_args(['--config-file', temp_file]) paste_to = temp_file.replace('.conf', '-paste.ini') if not paste_config_file and make_paste_file: paste_from = os.path.join(os.getcwd(), 'etc/glance-registry-paste.ini') _appendto(paste_from, paste_to, paste_append) app = config.load_paste_app('glance-registry') self.assertIsInstance(app, expected_app_type) def test_load_paste_app(self): expected_middleware = oslo_middleware.Healthcheck self._do_test_load_paste_app(expected_middleware) def test_load_paste_app_paste_config_not_found(self): expected_middleware = context.UnauthenticatedContextMiddleware self.assertRaises(RuntimeError, self._do_test_load_paste_app, expected_middleware, make_paste_file=False) def test_load_paste_app_with_paste_flavor(self): pipeline = ('[pipeline:glance-registry-incomplete]\n' 'pipeline = context registryapp') expected_middleware = context.ContextMiddleware self._do_test_load_paste_app(expected_middleware, paste_flavor='incomplete', paste_append=pipeline) def test_load_paste_app_with_paste_config_file(self): paste_config_file = os.path.join(os.getcwd(), 'etc/glance-registry-paste.ini') expected_middleware = oslo_middleware.Healthcheck self._do_test_load_paste_app(expected_middleware, paste_config_file=paste_config_file) def test_get_path_non_exist(self): self.assertRaises(RuntimeError, config._get_deployment_config_file) class TestDefaultConfig(test_utils.BaseTestCase): def setUp(self): super(TestDefaultConfig, self).setUp() self.CONF = config.cfg.CONF self.CONF.import_group('profiler', 'glance.common.wsgi') def test_osprofiler_disabled(self): self.assertFalse(self.CONF.profiler.enabled) self.assertFalse(self.CONF.profiler.trace_sqlalchemy) glance-12.0.0/glance/tests/unit/common/test_swift_store_utils.py0000664000567000056710000000725312701407047026240 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from glance.common import exception from glance.common import swift_store_utils from glance.tests.unit import base class TestSwiftParams(base.IsolatedUnitTest): def setUp(self): super(TestSwiftParams, self).setUp() conf_file = "glance-swift.conf" test_dir = self.useFixture(fixtures.TempDir()).path self.swift_config_file = self._copy_data_file(conf_file, test_dir) self.config(swift_store_config_file=self.swift_config_file) def tearDown(self): super(TestSwiftParams, self).tearDown() def test_multiple_swift_account_enabled(self): self.config(swift_store_config_file="glance-swift.conf") self.assertTrue( swift_store_utils.is_multiple_swift_store_accounts_enabled()) def test_multiple_swift_account_disabled(self): self.config(swift_store_config_file=None) self.assertFalse( swift_store_utils.is_multiple_swift_store_accounts_enabled()) def test_swift_config_file_doesnt_exist(self): self.config(swift_store_config_file='fake-file.conf') self.assertRaises(exception.InvalidSwiftStoreConfiguration, swift_store_utils.SwiftParams) def test_swift_config_uses_default_values_multiple_account_disabled(self): default_user = 'user_default' default_key = 'key_default' default_auth_address = 'auth@default.com' default_account_reference = 'ref_default' confs = {'swift_store_config_file': None, 'swift_store_user': default_user, 'swift_store_key': default_key, 'swift_store_auth_address': default_auth_address, 'default_swift_reference': default_account_reference} self.config(**confs) swift_params = swift_store_utils.SwiftParams().params self.assertEqual(1, len(swift_params.keys())) self.assertEqual(default_user, swift_params[default_account_reference]['user'] ) self.assertEqual(default_key, swift_params[default_account_reference]['key'] ) self.assertEqual(default_auth_address, swift_params[default_account_reference] ['auth_address'] ) def test_swift_store_config_validates_for_creds_auth_address(self): swift_params = swift_store_utils.SwiftParams().params self.assertEqual('tenant:user1', swift_params['ref1']['user'] ) self.assertEqual('key1', swift_params['ref1']['key'] ) self.assertEqual('example.com', swift_params['ref1']['auth_address']) self.assertEqual('user2', swift_params['ref2']['user']) self.assertEqual('key2', swift_params['ref2']['key']) self.assertEqual('http://example.com', swift_params['ref2']['auth_address'] ) glance-12.0.0/glance/tests/unit/common/test_exception.py0000664000567000056710000000371012701407047024440 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import encodeutils import six from glance.common import exception from glance.tests import utils as test_utils class GlanceExceptionTestCase(test_utils.BaseTestCase): def test_default_error_msg(self): class FakeGlanceException(exception.GlanceException): message = "default message" exc = FakeGlanceException() self.assertEqual('default message', encodeutils.exception_to_unicode(exc)) def test_specified_error_msg(self): msg = exception.GlanceException('test') self.assertIn('test', encodeutils.exception_to_unicode(msg)) def test_default_error_msg_with_kwargs(self): class FakeGlanceException(exception.GlanceException): message = "default message: %(code)s" exc = FakeGlanceException(code=500) self.assertEqual("default message: 500", encodeutils.exception_to_unicode(exc)) def test_specified_error_msg_with_kwargs(self): msg = exception.GlanceException('test: %(code)s', code=500) self.assertIn('test: 500', encodeutils.exception_to_unicode(msg)) def test_non_unicode_error_msg(self): exc = exception.GlanceException(str('test')) self.assertIsInstance(encodeutils.exception_to_unicode(exc), six.text_type) glance-12.0.0/glance/tests/unit/test_db_metadef.py0000664000567000056710000005560512701407047023236 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # Copyright 2014 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import encodeutils from glance.common import exception import glance.context import glance.db import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' NAMESPACE1 = 'namespace1' NAMESPACE2 = 'namespace2' NAMESPACE3 = 'namespace3' NAMESPACE4 = 'namespace4' PROPERTY1 = 'Property1' PROPERTY2 = 'Property2' PROPERTY3 = 'Property3' OBJECT1 = 'Object1' OBJECT2 = 'Object2' OBJECT3 = 'Object3' TAG1 = 'Tag1' TAG2 = 'Tag2' TAG3 = 'Tag3' TAG4 = 'Tag4' TAG5 = 'Tag5' RESOURCE_TYPE1 = 'ResourceType1' RESOURCE_TYPE2 = 'ResourceType2' RESOURCE_TYPE3 = 'ResourceType3' def _db_namespace_fixture(**kwargs): namespace = { 'namespace': None, 'display_name': None, 'description': None, 'visibility': True, 'protected': False, 'owner': None } namespace.update(kwargs) return namespace def _db_property_fixture(name, **kwargs): property = { 'name': name, 'json_schema': {"type": "string", "title": "title"}, } property.update(kwargs) return property def _db_object_fixture(name, **kwargs): obj = { 'name': name, 'description': None, 'json_schema': {}, 'required': '[]', } obj.update(kwargs) return obj def _db_tag_fixture(name, **kwargs): obj = { 'name': name } obj.update(kwargs) return obj def _db_tags_fixture(names=None): tags = [] if names: tag_name_list = names else: tag_name_list = [TAG1, TAG2, TAG3] for tag_name in tag_name_list: tags.append(_db_tag_fixture(tag_name)) return tags def _db_resource_type_fixture(name, **kwargs): obj = { 'name': name, 'protected': False, } obj.update(kwargs) return obj def _db_namespace_resource_type_fixture(name, **kwargs): obj = { 'name': name, 'properties_target': None, 'prefix': None, } obj.update(kwargs) return obj class TestMetadefRepo(test_utils.BaseTestCase): def setUp(self): super(TestMetadefRepo, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.context = glance.context.RequestContext(user=USER1, tenant=TENANT1) self.namespace_repo = glance.db.MetadefNamespaceRepo(self.context, self.db) self.property_repo = glance.db.MetadefPropertyRepo(self.context, self.db) self.object_repo = glance.db.MetadefObjectRepo(self.context, self.db) self.tag_repo = glance.db.MetadefTagRepo(self.context, self.db) self.resource_type_repo = glance.db.MetadefResourceTypeRepo( self.context, self.db) self.namespace_factory = glance.domain.MetadefNamespaceFactory() self.property_factory = glance.domain.MetadefPropertyFactory() self.object_factory = glance.domain.MetadefObjectFactory() self.tag_factory = glance.domain.MetadefTagFactory() self.resource_type_factory = glance.domain.MetadefResourceTypeFactory() self._create_namespaces() self._create_properties() self._create_objects() self._create_tags() self._create_resource_types() def _create_namespaces(self): self.namespaces = [ _db_namespace_fixture(namespace=NAMESPACE1, display_name='1', description='desc1', visibility='private', protected=True, owner=TENANT1), _db_namespace_fixture(namespace=NAMESPACE2, display_name='2', description='desc2', visibility='public', protected=False, owner=TENANT1), _db_namespace_fixture(namespace=NAMESPACE3, display_name='3', description='desc3', visibility='private', protected=True, owner=TENANT3), _db_namespace_fixture(namespace=NAMESPACE4, display_name='4', description='desc4', visibility='public', protected=True, owner=TENANT3) ] [self.db.metadef_namespace_create(None, namespace) for namespace in self.namespaces] def _create_properties(self): self.properties = [ _db_property_fixture(name=PROPERTY1), _db_property_fixture(name=PROPERTY2), _db_property_fixture(name=PROPERTY3) ] [self.db.metadef_property_create(self.context, NAMESPACE1, property) for property in self.properties] [self.db.metadef_property_create(self.context, NAMESPACE4, property) for property in self.properties] def _create_objects(self): self.objects = [ _db_object_fixture(name=OBJECT1, description='desc1'), _db_object_fixture(name=OBJECT2, description='desc2'), _db_object_fixture(name=OBJECT3, description='desc3'), ] [self.db.metadef_object_create(self.context, NAMESPACE1, object) for object in self.objects] [self.db.metadef_object_create(self.context, NAMESPACE4, object) for object in self.objects] def _create_tags(self): self.tags = [ _db_tag_fixture(name=TAG1), _db_tag_fixture(name=TAG2), _db_tag_fixture(name=TAG3), ] [self.db.metadef_tag_create(self.context, NAMESPACE1, tag) for tag in self.tags] [self.db.metadef_tag_create(self.context, NAMESPACE4, tag) for tag in self.tags] def _create_resource_types(self): self.resource_types = [ _db_resource_type_fixture(name=RESOURCE_TYPE1, protected=False), _db_resource_type_fixture(name=RESOURCE_TYPE2, protected=False), _db_resource_type_fixture(name=RESOURCE_TYPE3, protected=True), ] [self.db.metadef_resource_type_create(self.context, resource_type) for resource_type in self.resource_types] def test_get_namespace(self): namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual(NAMESPACE1, namespace.namespace) self.assertEqual('desc1', namespace.description) self.assertEqual('1', namespace.display_name) self.assertEqual(TENANT1, namespace.owner) self.assertTrue(namespace.protected) self.assertEqual('private', namespace.visibility) def test_get_namespace_not_found(self): fake_namespace = "fake_namespace" exc = self.assertRaises(exception.NotFound, self.namespace_repo.get, fake_namespace) self.assertIn(fake_namespace, encodeutils.exception_to_unicode(exc)) def test_get_namespace_forbidden(self): self.assertRaises(exception.NotFound, self.namespace_repo.get, NAMESPACE3) def test_list_namespace(self): namespaces = self.namespace_repo.list() namespace_names = set([n.namespace for n in namespaces]) self.assertEqual(set([NAMESPACE1, NAMESPACE2, NAMESPACE4]), namespace_names) def test_list_private_namespaces(self): filters = {'visibility': 'private'} namespaces = self.namespace_repo.list(filters=filters) namespace_names = set([n.namespace for n in namespaces]) self.assertEqual(set([NAMESPACE1]), namespace_names) def test_add_namespace(self): # NOTE(pawel-koniszewski): Change db_namespace_fixture to # namespace_factory when namespace primary key in DB # will be changed from Integer to UUID namespace = _db_namespace_fixture(namespace='added_namespace', display_name='fake', description='fake_desc', visibility='public', protected=True, owner=TENANT1) self.assertEqual('added_namespace', namespace['namespace']) self.db.metadef_namespace_create(None, namespace) retrieved_namespace = self.namespace_repo.get(namespace['namespace']) self.assertEqual('added_namespace', retrieved_namespace.namespace) def test_save_namespace(self): namespace = self.namespace_repo.get(NAMESPACE1) namespace.display_name = 'save_name' namespace.description = 'save_desc' self.namespace_repo.save(namespace) namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual('save_name', namespace.display_name) self.assertEqual('save_desc', namespace.description) def test_remove_namespace(self): namespace = self.namespace_repo.get(NAMESPACE1) self.namespace_repo.remove(namespace) self.assertRaises(exception.NotFound, self.namespace_repo.get, NAMESPACE1) def test_remove_namespace_not_found(self): fake_name = 'fake_name' namespace = self.namespace_repo.get(NAMESPACE1) namespace.namespace = fake_name exc = self.assertRaises(exception.NotFound, self.namespace_repo.remove, namespace) self.assertIn(fake_name, encodeutils.exception_to_unicode(exc)) def test_get_property(self): property = self.property_repo.get(NAMESPACE1, PROPERTY1) namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual(PROPERTY1, property.name) self.assertEqual(namespace.namespace, property.namespace.namespace) def test_get_property_not_found(self): exc = self.assertRaises(exception.NotFound, self.property_repo.get, NAMESPACE2, PROPERTY1) self.assertIn(PROPERTY1, encodeutils.exception_to_unicode(exc)) def test_list_property(self): properties = self.property_repo.list(filters={'namespace': NAMESPACE1}) property_names = set([p.name for p in properties]) self.assertEqual(set([PROPERTY1, PROPERTY2, PROPERTY3]), property_names) def test_list_property_empty_result(self): properties = self.property_repo.list(filters={'namespace': NAMESPACE2}) property_names = set([p.name for p in properties]) self.assertEqual(set([]), property_names) def test_list_property_namespace_not_found(self): exc = self.assertRaises(exception.NotFound, self.property_repo.list, filters={'namespace': 'not-a-namespace'}) self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) def test_add_property(self): # NOTE(pawel-koniszewski): Change db_property_fixture to # property_factory when property primary key in DB # will be changed from Integer to UUID property = _db_property_fixture(name='added_property') self.assertEqual('added_property', property['name']) self.db.metadef_property_create(self.context, NAMESPACE1, property) retrieved_property = self.property_repo.get(NAMESPACE1, 'added_property') self.assertEqual('added_property', retrieved_property.name) def test_add_property_namespace_forbidden(self): # NOTE(pawel-koniszewski): Change db_property_fixture to # property_factory when property primary key in DB # will be changed from Integer to UUID property = _db_property_fixture(name='added_property') self.assertEqual('added_property', property['name']) self.assertRaises(exception.Forbidden, self.db.metadef_property_create, self.context, NAMESPACE3, property) def test_add_property_namespace_not_found(self): # NOTE(pawel-koniszewski): Change db_property_fixture to # property_factory when property primary key in DB # will be changed from Integer to UUID property = _db_property_fixture(name='added_property') self.assertEqual('added_property', property['name']) self.assertRaises(exception.NotFound, self.db.metadef_property_create, self.context, 'not_a_namespace', property) def test_save_property(self): property = self.property_repo.get(NAMESPACE1, PROPERTY1) property.schema = '{"save": "schema"}' self.property_repo.save(property) property = self.property_repo.get(NAMESPACE1, PROPERTY1) self.assertEqual(PROPERTY1, property.name) self.assertEqual('{"save": "schema"}', property.schema) def test_remove_property(self): property = self.property_repo.get(NAMESPACE1, PROPERTY1) self.property_repo.remove(property) self.assertRaises(exception.NotFound, self.property_repo.get, NAMESPACE1, PROPERTY1) def test_remove_property_not_found(self): fake_name = 'fake_name' property = self.property_repo.get(NAMESPACE1, PROPERTY1) property.name = fake_name self.assertRaises(exception.NotFound, self.property_repo.remove, property) def test_get_object(self): object = self.object_repo.get(NAMESPACE1, OBJECT1) namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual(OBJECT1, object.name) self.assertEqual('desc1', object.description) self.assertEqual(['[]'], object.required) self.assertEqual({}, object.properties) self.assertEqual(namespace.namespace, object.namespace.namespace) def test_get_object_not_found(self): exc = self.assertRaises(exception.NotFound, self.object_repo.get, NAMESPACE2, OBJECT1) self.assertIn(OBJECT1, encodeutils.exception_to_unicode(exc)) def test_list_object(self): objects = self.object_repo.list(filters={'namespace': NAMESPACE1}) object_names = set([o.name for o in objects]) self.assertEqual(set([OBJECT1, OBJECT2, OBJECT3]), object_names) def test_list_object_empty_result(self): objects = self.object_repo.list(filters={'namespace': NAMESPACE2}) object_names = set([o.name for o in objects]) self.assertEqual(set([]), object_names) def test_list_object_namespace_not_found(self): exc = self.assertRaises(exception.NotFound, self.object_repo.list, filters={'namespace': 'not-a-namespace'}) self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) def test_add_object(self): # NOTE(pawel-koniszewski): Change db_object_fixture to # object_factory when object primary key in DB # will be changed from Integer to UUID object = _db_object_fixture(name='added_object') self.assertEqual('added_object', object['name']) self.db.metadef_object_create(self.context, NAMESPACE1, object) retrieved_object = self.object_repo.get(NAMESPACE1, 'added_object') self.assertEqual('added_object', retrieved_object.name) def test_add_object_namespace_forbidden(self): # NOTE(pawel-koniszewski): Change db_object_fixture to # object_factory when object primary key in DB # will be changed from Integer to UUID object = _db_object_fixture(name='added_object') self.assertEqual('added_object', object['name']) self.assertRaises(exception.Forbidden, self.db.metadef_object_create, self.context, NAMESPACE3, object) def test_add_object_namespace_not_found(self): # NOTE(pawel-koniszewski): Change db_object_fixture to # object_factory when object primary key in DB # will be changed from Integer to UUID object = _db_object_fixture(name='added_object') self.assertEqual('added_object', object['name']) self.assertRaises(exception.NotFound, self.db.metadef_object_create, self.context, 'not-a-namespace', object) def test_save_object(self): object = self.object_repo.get(NAMESPACE1, OBJECT1) object.required = ['save_req'] object.description = 'save_desc' self.object_repo.save(object) object = self.object_repo.get(NAMESPACE1, OBJECT1) self.assertEqual(OBJECT1, object.name) self.assertEqual(['save_req'], object.required) self.assertEqual('save_desc', object.description) def test_remove_object(self): object = self.object_repo.get(NAMESPACE1, OBJECT1) self.object_repo.remove(object) self.assertRaises(exception.NotFound, self.object_repo.get, NAMESPACE1, OBJECT1) def test_remove_object_not_found(self): fake_name = 'fake_name' object = self.object_repo.get(NAMESPACE1, OBJECT1) object.name = fake_name self.assertRaises(exception.NotFound, self.object_repo.remove, object) def test_list_resource_type(self): resource_type = self.resource_type_repo.list( filters={'namespace': NAMESPACE1}) self.assertEqual(0, len(resource_type)) def test_get_tag(self): tag = self.tag_repo.get(NAMESPACE1, TAG1) namespace = self.namespace_repo.get(NAMESPACE1) self.assertEqual(TAG1, tag.name) self.assertEqual(namespace.namespace, tag.namespace.namespace) def test_get_tag_not_found(self): exc = self.assertRaises(exception.NotFound, self.tag_repo.get, NAMESPACE2, TAG1) self.assertIn(TAG1, encodeutils.exception_to_unicode(exc)) def test_list_tag(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) def test_list_tag_empty_result(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE2}) tag_names = set([t.name for t in tags]) self.assertEqual(set([]), tag_names) def test_list_tag_namespace_not_found(self): exc = self.assertRaises(exception.NotFound, self.tag_repo.list, filters={'namespace': 'not-a-namespace'}) self.assertIn('not-a-namespace', encodeutils.exception_to_unicode(exc)) def test_add_tag(self): # NOTE(pawel-koniszewski): Change db_tag_fixture to # tag_factory when tag primary key in DB # will be changed from Integer to UUID tag = _db_tag_fixture(name='added_tag') self.assertEqual('added_tag', tag['name']) self.db.metadef_tag_create(self.context, NAMESPACE1, tag) retrieved_tag = self.tag_repo.get(NAMESPACE1, 'added_tag') self.assertEqual('added_tag', retrieved_tag.name) def test_add_tags(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) tags = _db_tags_fixture([TAG3, TAG4, TAG5]) self.db.metadef_tag_create_tags(self.context, NAMESPACE1, tags) tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG3, TAG4, TAG5]), tag_names) def test_add_duplicate_tags_with_pre_existing_tags(self): tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) tags = _db_tags_fixture([TAG5, TAG4, TAG5]) self.assertRaises(exception.Duplicate, self.db.metadef_tag_create_tags, self.context, NAMESPACE1, tags) tags = self.tag_repo.list(filters={'namespace': NAMESPACE1}) tag_names = set([t.name for t in tags]) self.assertEqual(set([TAG1, TAG2, TAG3]), tag_names) def test_add_tag_namespace_forbidden(self): # NOTE(pawel-koniszewski): Change db_tag_fixture to # tag_factory when tag primary key in DB # will be changed from Integer to UUID tag = _db_tag_fixture(name='added_tag') self.assertEqual('added_tag', tag['name']) self.assertRaises(exception.Forbidden, self.db.metadef_tag_create, self.context, NAMESPACE3, tag) def test_add_tag_namespace_not_found(self): # NOTE(pawel-koniszewski): Change db_tag_fixture to # tag_factory when tag primary key in DB # will be changed from Integer to UUID tag = _db_tag_fixture(name='added_tag') self.assertEqual('added_tag', tag['name']) self.assertRaises(exception.NotFound, self.db.metadef_tag_create, self.context, 'not-a-namespace', tag) def test_save_tag(self): tag = self.tag_repo.get(NAMESPACE1, TAG1) self.tag_repo.save(tag) tag = self.tag_repo.get(NAMESPACE1, TAG1) self.assertEqual(TAG1, tag.name) def test_remove_tag(self): tag = self.tag_repo.get(NAMESPACE1, TAG1) self.tag_repo.remove(tag) self.assertRaises(exception.NotFound, self.tag_repo.get, NAMESPACE1, TAG1) def test_remove_tag_not_found(self): fake_name = 'fake_name' tag = self.tag_repo.get(NAMESPACE1, TAG1) tag.name = fake_name self.assertRaises(exception.NotFound, self.tag_repo.remove, tag) glance-12.0.0/glance/tests/unit/test_versions.py0000664000567000056710000001517712701407051023027 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils import webob from glance.api.middleware import version_negotiation from glance.api import versions from glance.tests.unit import base class VersionsTest(base.IsolatedUnitTest): """Test the version information returned from the API service.""" def test_get_version_list(self): req = webob.Request.blank('/', base_url='http://127.0.0.1:9292/') req.accept = 'application/json' self.config(bind_host='127.0.0.1', bind_port=9292) res = versions.Controller().index(req) self.assertEqual(300, res.status_int) self.assertEqual('application/json', res.content_type) results = jsonutils.loads(res.body)['versions'] expected = [ { 'id': 'v2.3', 'status': 'CURRENT', 'links': [{'rel': 'self', 'href': 'http://127.0.0.1:9292/v2/'}], }, { 'id': 'v2.2', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'http://127.0.0.1:9292/v2/'}], }, { 'id': 'v2.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'http://127.0.0.1:9292/v2/'}], }, { 'id': 'v2.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'http://127.0.0.1:9292/v2/'}], }, { 'id': 'v1.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'http://127.0.0.1:9292/v1/'}], }, { 'id': 'v1.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'http://127.0.0.1:9292/v1/'}], }, ] self.assertEqual(expected, results) def test_get_version_list_public_endpoint(self): req = webob.Request.blank('/', base_url='http://127.0.0.1:9292/') req.accept = 'application/json' self.config(bind_host='127.0.0.1', bind_port=9292, public_endpoint='https://example.com:9292') res = versions.Controller().index(req) self.assertEqual(300, res.status_int) self.assertEqual('application/json', res.content_type) results = jsonutils.loads(res.body)['versions'] expected = [ { 'id': 'v2.3', 'status': 'CURRENT', 'links': [{'rel': 'self', 'href': 'https://example.com:9292/v2/'}], }, { 'id': 'v2.2', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'https://example.com:9292/v2/'}], }, { 'id': 'v2.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'https://example.com:9292/v2/'}], }, { 'id': 'v2.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'https://example.com:9292/v2/'}], }, { 'id': 'v1.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'https://example.com:9292/v1/'}], }, { 'id': 'v1.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': 'https://example.com:9292/v1/'}], }, ] self.assertEqual(expected, results) class VersionNegotiationTest(base.IsolatedUnitTest): def setUp(self): super(VersionNegotiationTest, self).setUp() self.middleware = version_negotiation.VersionNegotiationFilter(None) def test_request_url_v1(self): request = webob.Request.blank('/v1/images') self.middleware.process_request(request) self.assertEqual('/v1/images', request.path_info) def test_request_url_v1_0(self): request = webob.Request.blank('/v1.0/images') self.middleware.process_request(request) self.assertEqual('/v1/images', request.path_info) def test_request_url_v1_1(self): request = webob.Request.blank('/v1.1/images') self.middleware.process_request(request) self.assertEqual('/v1/images', request.path_info) def test_request_accept_v1(self): request = webob.Request.blank('/images') request.headers = {'accept': 'application/vnd.openstack.images-v1'} self.middleware.process_request(request) self.assertEqual('/v1/images', request.path_info) def test_request_url_v2(self): request = webob.Request.blank('/v2/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_0(self): request = webob.Request.blank('/v2.0/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_1(self): request = webob.Request.blank('/v2.1/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_2(self): request = webob.Request.blank('/v2.2/images') self.middleware.process_request(request) self.assertEqual('/v2/images', request.path_info) def test_request_url_v2_3_unsupported(self): request = webob.Request.blank('/v2.3/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) def test_request_url_v4_unsupported(self): request = webob.Request.blank('/v4/images') resp = self.middleware.process_request(request) self.assertIsInstance(resp, versions.Controller) glance-12.0.0/glance/tests/unit/api/0000775000567000056710000000000012701407204020304 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/api/test_common.py0000664000567000056710000001305512701407047023216 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools import webob import glance.api.common from glance.common import config from glance.common import exception from glance.tests import utils as test_utils class SimpleIterator(object): def __init__(self, file_object, chunk_size): self.file_object = file_object self.chunk_size = chunk_size def __iter__(self): def read_chunk(): return self.fobj.read(self.chunk_size) chunk = read_chunk() while chunk: yield chunk chunk = read_chunk() else: raise StopIteration() class TestSizeCheckedIter(testtools.TestCase): def _get_image_metadata(self): return {'id': 'e31cb99c-fe89-49fb-9cc5-f5104fffa636'} def _get_webob_response(self): request = webob.Request.blank('/') response = webob.Response() response.request = request return response def test_uniform_chunk_size(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter( resp, meta, 4, ['AB', 'CD'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertRaises(StopIteration, next, checked_image) def test_small_last_chunk(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter( resp, meta, 3, ['AB', 'C'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('C', next(checked_image)) self.assertRaises(StopIteration, next, checked_image) def test_variable_chunk_size(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter( resp, meta, 6, ['AB', '', 'CDE', 'F'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('', next(checked_image)) self.assertEqual('CDE', next(checked_image)) self.assertEqual('F', next(checked_image)) self.assertRaises(StopIteration, next, checked_image) def test_too_many_chunks(self): """An image should streamed regardless of expected_size""" resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter( resp, meta, 4, ['AB', 'CD', 'EF'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertEqual('EF', next(checked_image)) self.assertRaises(exception.GlanceException, next, checked_image) def test_too_few_chunks(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter(resp, meta, 6, ['AB', 'CD'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertRaises(exception.GlanceException, next, checked_image) def test_too_much_data(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter(resp, meta, 3, ['AB', 'CD'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertRaises(exception.GlanceException, next, checked_image) def test_too_little_data(self): resp = self._get_webob_response() meta = self._get_image_metadata() checked_image = glance.api.common.size_checked_iter(resp, meta, 6, ['AB', 'CD', 'E'], None) self.assertEqual('AB', next(checked_image)) self.assertEqual('CD', next(checked_image)) self.assertEqual('E', next(checked_image)) self.assertRaises(exception.GlanceException, next, checked_image) class TestMalformedRequest(test_utils.BaseTestCase): def setUp(self): """Establish a clean test environment""" super(TestMalformedRequest, self).setUp() self.config(flavor='', group='paste_deploy', config_file='etc/glance-api-paste.ini') self.api = config.load_paste_app('glance-api') def test_redirect_incomplete_url(self): """Test Glance redirects /v# to /v#/ with correct Location header""" req = webob.Request.blank('/v1.1') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPFound.code, res.status_int) self.assertEqual('http://localhost/v1/', res.location) glance-12.0.0/glance/tests/unit/api/test_cmd_cache_manage.py0000664000567000056710000004201312701407047025120 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import optparse import sys import mock import prettytable from six.moves import StringIO from glance.cmd import cache_manage from glance.common import exception import glance.common.utils import glance.image_cache.client from glance.tests import utils as test_utils @mock.patch('sys.stdout', mock.Mock()) class TestGlanceCmdManage(test_utils.BaseTestCase): @mock.patch.object(optparse.OptionParser, 'print_help') @mock.patch.object(optparse.OptionParser, 'parse_args') def test_help(self, mock_parse_args, mock_print_help): mock_parse_args.return_value = (optparse.Values(), ['help']) oparser = optparse.OptionParser() (options, command, args) = cache_manage.parse_options(oparser, ['help']) command(options, args) self.assertEqual(1, mock_print_help.call_count) @mock.patch.object(optparse.OptionParser, 'parse_args') def test_help_with_command(self, mock_parse_args): mock_parse_args.return_value = (optparse.Values(), ['help', 'list-cached']) oparser = optparse.OptionParser() (options, command, args) = cache_manage.parse_options(oparser, ['help', 'list-cached']) command(options, args) @mock.patch.object(sys, 'exit') @mock.patch.object(optparse.OptionParser, 'parse_args') def test_help_with_redundant_command(self, mock_parse_args, mock_exit): mock_parse_args.return_value = (optparse.Values(), ['help', 'list-cached', "1"]) oparser = optparse.OptionParser() (options, command, args) = cache_manage.parse_options(oparser, ['help', 'list-cached', "1"]) command(options, args) self.assertEqual(1, mock_exit.call_count) @mock.patch.object(glance.image_cache.client.CacheClient, 'get_cached_images') @mock.patch.object(prettytable.PrettyTable, 'add_row') def test_list_cached_images(self, mock_row_create, mock_images): """ Verify that list_cached() method correctly processes images with all filled data and images with not filled 'last_accessed' field. """ mock_images.return_value = [ {'last_accessed': float(0), 'last_modified': float(1378985797.124511), 'image_id': '1', 'size': '128', 'hits': '1'}, {'last_accessed': float(1378985797.124511), 'last_modified': float(1378985797.124511), 'image_id': '2', 'size': '255', 'hits': '2'}] cache_manage.list_cached(mock.Mock(), '') self.assertEqual(len(mock_images.return_value), mock_row_create.call_count) @mock.patch.object(glance.image_cache.client.CacheClient, 'get_cached_images') def test_list_cached_images_empty(self, mock_images): """ Verify that list_cached() method handles a case when no images are cached without errors. """ mock_images.return_value = [] self.assertEqual(cache_manage.SUCCESS, cache_manage.list_cached(mock.Mock(), '')) @mock.patch.object(glance.image_cache.client.CacheClient, 'get_queued_images') @mock.patch.object(prettytable.PrettyTable, 'add_row') def test_list_queued_images(self, mock_row_create, mock_images): """Verify that list_queued() method correctly processes images.""" mock_images.return_value = [ {'image_id': '1'}, {'image_id': '2'}] cache_manage.list_queued(mock.Mock(), '') self.assertEqual(len(mock_images.return_value), mock_row_create.call_count) @mock.patch.object(glance.image_cache.client.CacheClient, 'get_queued_images') def test_list_queued_images_empty(self, mock_images): """ Verify that list_queued() method handles a case when no images were queued without errors. """ mock_images.return_value = [] self.assertEqual(cache_manage.SUCCESS, cache_manage.list_queued(mock.Mock(), '')) def test_queue_image_without_index(self): self.assertEqual(cache_manage.FAILURE, cache_manage.queue_image(mock.Mock(), [])) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_queue_image_not_forced_not_confirmed(self, mock_client, mock_confirm): # options.forced set to False and queue confirmation set to False. mock_confirm.return_value = False mock_options = mock.Mock() mock_options.force = False self.assertEqual(cache_manage.SUCCESS, cache_manage.queue_image(mock_options, ['img_id'])) self.assertFalse(mock_client.called) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_queue_image_not_forced_confirmed(self, mock_client, mock_confirm): # options.forced set to False and queue confirmation set to True. mock_confirm.return_value = True mock_options = mock.Mock() mock_options.force = False mock_options.verbose = True # to cover additional condition and line manager = mock.MagicMock() manager.attach_mock(mock_client, 'mock_client') self.assertEqual(cache_manage.SUCCESS, cache_manage.queue_image(mock_options, ['img_id'])) self.assertTrue(mock_client.called) self.assertIn( mock.call.mock_client().queue_image_for_caching('img_id'), manager.mock_calls) def test_delete_cached_image_without_index(self): self.assertEqual(cache_manage.FAILURE, cache_manage.delete_cached_image(mock.Mock(), [])) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_delete_cached_image_not_forced_not_confirmed(self, mock_client, mock_confirm): # options.forced set to False and delete confirmation set to False. mock_confirm.return_value = False mock_options = mock.Mock() mock_options.force = False self.assertEqual( cache_manage.SUCCESS, cache_manage.delete_cached_image(mock_options, ['img_id'])) self.assertFalse(mock_client.called) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_delete_cached_image_not_forced_confirmed(self, mock_client, mock_confirm): # options.forced set to False and delete confirmation set to True. mock_confirm.return_value = True mock_options = mock.Mock() mock_options.force = False mock_options.verbose = True # to cover additional condition and line manager = mock.MagicMock() manager.attach_mock(mock_client, 'mock_client') self.assertEqual( cache_manage.SUCCESS, cache_manage.delete_cached_image(mock_options, ['img_id'])) self.assertIn( mock.call.mock_client().delete_cached_image('img_id'), manager.mock_calls) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_delete_cached_images_not_forced_not_confirmed(self, mock_client, mock_confirm): # options.forced set to False and delete confirmation set to False. mock_confirm.return_value = False mock_options = mock.Mock() mock_options.force = False self.assertEqual( cache_manage.SUCCESS, cache_manage.delete_all_cached_images(mock_options, None)) self.assertFalse(mock_client.called) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_delete_cached_images_not_forced_confirmed(self, mock_client, mock_confirm): # options.forced set to False and delete confirmation set to True. mock_confirm.return_value = True mock_options = mock.Mock() mock_options.force = False mock_options.verbose = True # to cover additional condition and line manager = mock.MagicMock() manager.attach_mock(mock_client, 'mock_client') self.assertEqual( cache_manage.SUCCESS, cache_manage.delete_all_cached_images(mock_options, None)) self.assertTrue(mock_client.called) self.assertIn( mock.call.mock_client().delete_all_cached_images(), manager.mock_calls) def test_delete_queued_image_without_index(self): self.assertEqual(cache_manage.FAILURE, cache_manage.delete_queued_image(mock.Mock(), [])) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_delete_queued_image_not_forced_not_confirmed(self, mock_client, mock_confirm): # options.forced set to False and delete confirmation set to False. mock_confirm.return_value = False mock_options = mock.Mock() mock_options.force = False self.assertEqual( cache_manage.SUCCESS, cache_manage.delete_queued_image(mock_options, ['img_id'])) self.assertFalse(mock_client.called) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_delete_queued_image_not_forced_confirmed(self, mock_client, mock_confirm): # options.forced set to False and delete confirmation set to True. mock_confirm.return_value = True mock_options = mock.Mock() mock_options.force = False mock_options.verbose = True # to cover additional condition and line manager = mock.MagicMock() manager.attach_mock(mock_client, 'mock_client') self.assertEqual( cache_manage.SUCCESS, cache_manage.delete_queued_image(mock_options, ['img_id'])) self.assertTrue(mock_client.called) self.assertIn( mock.call.mock_client().delete_queued_image('img_id'), manager.mock_calls) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_delete_queued_images_not_forced_not_confirmed(self, mock_client, mock_confirm): # options.forced set to False and delete confirmation set to False. mock_confirm.return_value = False mock_options = mock.Mock() mock_options.force = False self.assertEqual( cache_manage.SUCCESS, cache_manage.delete_all_queued_images(mock_options, None)) self.assertFalse(mock_client.called) @mock.patch.object(glance.cmd.cache_manage, 'user_confirm') @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_delete_queued_images_not_forced_confirmed(self, mock_client, mock_confirm): # options.forced set to False and delete confirmation set to True. mock_confirm.return_value = True mock_options = mock.Mock() mock_options.force = False mock_options.verbose = True # to cover additional condition and line manager = mock.MagicMock() manager.attach_mock(mock_client, 'mock_client') self.assertEqual( cache_manage.SUCCESS, cache_manage.delete_all_queued_images(mock_options, None)) self.assertTrue(mock_client.called) self.assertIn( mock.call.mock_client().delete_all_queued_images(), manager.mock_calls) @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_catch_error_not_found(self, mock_function): mock_function.side_effect = exception.NotFound() self.assertEqual(cache_manage.FAILURE, cache_manage.list_cached(mock.Mock(), None)) @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_catch_error_forbidden(self, mock_function): mock_function.side_effect = exception.Forbidden() self.assertEqual(cache_manage.FAILURE, cache_manage.list_cached(mock.Mock(), None)) @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_catch_error_unhandled(self, mock_function): mock_function.side_effect = exception.Duplicate() my_mock = mock.Mock() my_mock.debug = False self.assertEqual(cache_manage.FAILURE, cache_manage.list_cached(my_mock, None)) @mock.patch.object(glance.cmd.cache_manage, 'get_client') def test_catch_error_unhandled_debug_mode(self, mock_function): mock_function.side_effect = exception.Duplicate() my_mock = mock.Mock() my_mock.debug = True self.assertRaises(exception.Duplicate, cache_manage.list_cached, my_mock, None) def test_cache_manage_env(self): def_value = 'sometext12345678900987654321' self.assertNotEqual(def_value, cache_manage.env('PATH', default=def_value)) def test_cache_manage_env_default(self): def_value = 'sometext12345678900987654321' self.assertEqual(def_value, cache_manage.env('TMPVALUE1234567890', default=def_value)) def test_create_option(self): oparser = optparse.OptionParser() cache_manage.create_options(oparser) self.assertTrue(len(oparser.option_list) > 0) @mock.patch.object(glance.cmd.cache_manage, 'lookup_command') def test_parse_options_no_parameters(self, mock_lookup): with mock.patch('sys.stdout', new_callable=StringIO): oparser = optparse.OptionParser() cache_manage.create_options(oparser) result = self.assertRaises(SystemExit, cache_manage.parse_options, oparser, []) self.assertEqual(0, result.code) self.assertFalse(mock_lookup.called) @mock.patch.object(optparse.OptionParser, 'print_usage') def test_parse_options_no_arguments(self, mock_printout): oparser = optparse.OptionParser() cache_manage.create_options(oparser) result = self.assertRaises(SystemExit, cache_manage.parse_options, oparser, ['-p', '1212']) self.assertEqual(0, result.code) self.assertTrue(mock_printout.called) @mock.patch.object(glance.cmd.cache_manage, 'lookup_command') def test_parse_options_retrieve_command(self, mock_lookup): mock_lookup.return_value = True oparser = optparse.OptionParser() cache_manage.create_options(oparser) (options, command, args) = cache_manage.parse_options(oparser, ['-p', '1212', 'list-cached']) self.assertTrue(command) def test_lookup_command_unsupported_command(self): self.assertRaises(SystemExit, cache_manage.lookup_command, mock.Mock(), 'unsupported_command') def test_lookup_command_supported_command(self): command = cache_manage.lookup_command(mock.Mock(), 'list-cached') self.assertEqual(cache_manage.list_cached, command) glance-12.0.0/glance/tests/unit/api/__init__.py0000664000567000056710000000000012701407047022410 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/api/test_property_protections.py0000664000567000056710000003325212701407047026244 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api import policy from glance.api import property_protections from glance.common import exception from glance.common import property_utils import glance.domain from glance.tests import utils TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' class TestProtectedImageRepoProxy(utils.BaseTestCase): class ImageRepoStub(object): def __init__(self, fixtures): self.fixtures = fixtures def get(self, image_id): for f in self.fixtures: if f.image_id == image_id: return f else: raise ValueError(image_id) def list(self, *args, **kwargs): return self.fixtures def add(self, image): self.fixtures.append(image) def setUp(self): super(TestProtectedImageRepoProxy, self).setUp() self.set_property_protections() self.policy = policy.Enforcer() self.property_rules = property_utils.PropertyRules(self.policy) self.image_factory = glance.domain.ImageFactory() extra_props = {'spl_create_prop': 'c', 'spl_read_prop': 'r', 'spl_update_prop': 'u', 'spl_delete_prop': 'd', 'forbidden': 'prop'} extra_props_2 = {'spl_read_prop': 'r', 'forbidden': 'prop'} self.fixtures = [ self.image_factory.new_image(image_id='1', owner=TENANT1, extra_properties=extra_props), self.image_factory.new_image(owner=TENANT2, visibility='public'), self.image_factory.new_image(image_id='3', owner=TENANT1, extra_properties=extra_props_2), ] self.context = glance.context.RequestContext(roles=['spl_role']) image_repo = self.ImageRepoStub(self.fixtures) self.image_repo = property_protections.ProtectedImageRepoProxy( image_repo, self.context, self.property_rules) def test_get_image(self): image_id = '1' result_image = self.image_repo.get(image_id) result_extra_props = result_image.extra_properties self.assertEqual('c', result_extra_props['spl_create_prop']) self.assertEqual('r', result_extra_props['spl_read_prop']) self.assertEqual('u', result_extra_props['spl_update_prop']) self.assertEqual('d', result_extra_props['spl_delete_prop']) self.assertNotIn('forbidden', result_extra_props.keys()) def test_list_image(self): result_images = self.image_repo.list() self.assertEqual(3, len(result_images)) result_extra_props = result_images[0].extra_properties self.assertEqual('c', result_extra_props['spl_create_prop']) self.assertEqual('r', result_extra_props['spl_read_prop']) self.assertEqual('u', result_extra_props['spl_update_prop']) self.assertEqual('d', result_extra_props['spl_delete_prop']) self.assertNotIn('forbidden', result_extra_props.keys()) result_extra_props = result_images[1].extra_properties self.assertEqual({}, result_extra_props) result_extra_props = result_images[2].extra_properties self.assertEqual('r', result_extra_props['spl_read_prop']) self.assertNotIn('forbidden', result_extra_props.keys()) class TestProtectedImageProxy(utils.BaseTestCase): def setUp(self): super(TestProtectedImageProxy, self).setUp() self.set_property_protections() self.policy = policy.Enforcer() self.property_rules = property_utils.PropertyRules(self.policy) class ImageStub(object): def __init__(self, extra_prop): self.extra_properties = extra_prop def test_read_image_with_extra_prop(self): context = glance.context.RequestContext(roles=['spl_role']) extra_prop = {'spl_read_prop': 'read', 'spl_fake_prop': 'prop'} image = self.ImageStub(extra_prop) result_image = property_protections.ProtectedImageProxy( image, context, self.property_rules) result_extra_props = result_image.extra_properties self.assertEqual('read', result_extra_props['spl_read_prop']) self.assertNotIn('spl_fake_prop', result_extra_props.keys()) class TestExtraPropertiesProxy(utils.BaseTestCase): def setUp(self): super(TestExtraPropertiesProxy, self).setUp() self.set_property_protections() self.policy = policy.Enforcer() self.property_rules = property_utils.PropertyRules(self.policy) def test_read_extra_property_as_admin_role(self): extra_properties = {'foo': 'bar', 'ping': 'pong'} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) test_result = extra_prop_proxy['foo'] self.assertEqual('bar', test_result) def test_read_extra_property_as_unpermitted_role(self): extra_properties = {'foo': 'bar', 'ping': 'pong'} context = glance.context.RequestContext(roles=['unpermitted_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(KeyError, extra_prop_proxy.__getitem__, 'foo') def test_update_extra_property_as_permitted_role_after_read(self): extra_properties = {'foo': 'bar', 'ping': 'pong'} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) extra_prop_proxy['foo'] = 'par' self.assertEqual('par', extra_prop_proxy['foo']) def test_update_extra_property_as_unpermitted_role_after_read(self): extra_properties = {'spl_read_prop': 'bar'} context = glance.context.RequestContext(roles=['spl_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(exception.ReservedProperty, extra_prop_proxy.__setitem__, 'spl_read_prop', 'par') def test_update_reserved_extra_property(self): extra_properties = {'spl_create_prop': 'bar'} context = glance.context.RequestContext(roles=['spl_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(exception.ReservedProperty, extra_prop_proxy.__setitem__, 'spl_create_prop', 'par') def test_update_empty_extra_property(self): extra_properties = {'foo': ''} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) extra_prop_proxy['foo'] = 'bar' self.assertEqual('bar', extra_prop_proxy['foo']) def test_create_extra_property_admin(self): extra_properties = {} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) extra_prop_proxy['boo'] = 'doo' self.assertEqual('doo', extra_prop_proxy['boo']) def test_create_reserved_extra_property(self): extra_properties = {} context = glance.context.RequestContext(roles=['spl_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(exception.ReservedProperty, extra_prop_proxy.__setitem__, 'boo', 'doo') def test_delete_extra_property_as_admin_role(self): extra_properties = {'foo': 'bar'} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) del extra_prop_proxy['foo'] self.assertRaises(KeyError, extra_prop_proxy.__getitem__, 'foo') def test_delete_nonexistant_extra_property_as_admin_role(self): extra_properties = {} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) self.assertRaises(KeyError, extra_prop_proxy.__delitem__, 'foo') def test_delete_reserved_extra_property(self): extra_properties = {'spl_read_prop': 'r'} context = glance.context.RequestContext(roles=['spl_role']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) # Ensure property has been created and can be read self.assertEqual('r', extra_prop_proxy['spl_read_prop']) self.assertRaises(exception.ReservedProperty, extra_prop_proxy.__delitem__, 'spl_read_prop') def test_delete_nonexistant_extra_property(self): extra_properties = {} roles = ['spl_role'] extra_prop_proxy = property_protections.ExtraPropertiesProxy( roles, extra_properties, self.property_rules) self.assertRaises(KeyError, extra_prop_proxy.__delitem__, 'spl_read_prop') def test_delete_empty_extra_property(self): extra_properties = {'foo': ''} context = glance.context.RequestContext(roles=['admin']) extra_prop_proxy = property_protections.ExtraPropertiesProxy( context, extra_properties, self.property_rules) del extra_prop_proxy['foo'] self.assertNotIn('foo', extra_prop_proxy) class TestProtectedImageFactoryProxy(utils.BaseTestCase): def setUp(self): super(TestProtectedImageFactoryProxy, self).setUp() self.set_property_protections() self.policy = policy.Enforcer() self.property_rules = property_utils.PropertyRules(self.policy) self.factory = glance.domain.ImageFactory() def test_create_image_no_extra_prop(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['spl_role']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {} image = self.image_factory.new_image(extra_properties=extra_props) expected_extra_props = {} self.assertEqual(expected_extra_props, image.extra_properties) def test_create_image_extra_prop(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['spl_role']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {'spl_create_prop': 'c'} image = self.image_factory.new_image(extra_properties=extra_props) expected_extra_props = {'spl_create_prop': 'c'} self.assertEqual(expected_extra_props, image.extra_properties) def test_create_image_extra_prop_reserved_property(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['spl_role']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} # no reg ex for property 'foo' is mentioned for spl_role in config self.assertRaises(exception.ReservedProperty, self.image_factory.new_image, extra_properties=extra_props) def test_create_image_extra_prop_admin(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['admin']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} image = self.image_factory.new_image(extra_properties=extra_props) expected_extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} self.assertEqual(expected_extra_props, image.extra_properties) def test_create_image_extra_prop_invalid_role(self): self.context = glance.context.RequestContext(tenant=TENANT1, roles=['imaginary-role']) self.image_factory = property_protections.ProtectedImageFactoryProxy( self.factory, self.context, self.property_rules) extra_props = {'foo': 'bar', 'spl_create_prop': 'c'} self.assertRaises(exception.ReservedProperty, self.image_factory.new_image, extra_properties=extra_props) glance-12.0.0/glance/tests/unit/api/middleware/0000775000567000056710000000000012701407204022421 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/api/middleware/test_cache_manage.py0000664000567000056710000001430212701407047026412 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api import cached_images from glance.api.middleware import cache_manage import glance.common.config import glance.common.wsgi import glance.image_cache from glance.tests import utils as test_utils import mock import webob class TestCacheManageFilter(test_utils.BaseTestCase): @mock.patch.object(glance.image_cache.ImageCache, "init_driver") def setUp(self, mock_init_driver): super(TestCacheManageFilter, self).setUp() self.stub_application_name = "stubApplication" self.stub_value = "Stub value" self.image_id = "image_id_stub" mock_init_driver.return_value = None self.cache_manage_filter = cache_manage.CacheManageFilter( self.stub_application_name) def test_bogus_request(self): # prepare bogus_request = webob.Request.blank("/bogus/") # call resource = self.cache_manage_filter.process_request(bogus_request) # check self.assertIsNone(resource) @mock.patch.object(cached_images.Controller, "get_cached_images") def test_get_cached_images(self, mock_get_cached_images): # setup mock_get_cached_images.return_value = self.stub_value # prepare request = webob.Request.blank("/v1/cached_images") # call resource = self.cache_manage_filter.process_request(request) # check mock_get_cached_images.assert_called_with(request) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.Controller, "delete_cached_image") def test_delete_cached_image(self, mock_delete_cached_image): # setup mock_delete_cached_image.return_value = self.stub_value # prepare request = webob.Request.blank("/v1/cached_images/" + self.image_id, environ={'REQUEST_METHOD': "DELETE"}) # call resource = self.cache_manage_filter.process_request(request) # check mock_delete_cached_image.assert_called_with(request, image_id=self.image_id) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.Controller, "delete_cached_images") def test_delete_cached_images(self, mock_delete_cached_images): # setup mock_delete_cached_images.return_value = self.stub_value # prepare request = webob.Request.blank("/v1/cached_images", environ={'REQUEST_METHOD': "DELETE"}) # call resource = self.cache_manage_filter.process_request(request) # check mock_delete_cached_images.assert_called_with(request) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.Controller, "queue_image") def test_put_queued_image(self, mock_queue_image): # setup mock_queue_image.return_value = self.stub_value # prepare request = webob.Request.blank("/v1/queued_images/" + self.image_id, environ={'REQUEST_METHOD': "PUT"}) # call resource = self.cache_manage_filter.process_request(request) # check mock_queue_image.assert_called_with(request, image_id=self.image_id) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.Controller, "get_queued_images") def test_get_queued_images(self, mock_get_queued_images): # setup mock_get_queued_images.return_value = self.stub_value # prepare request = webob.Request.blank("/v1/queued_images") # call resource = self.cache_manage_filter.process_request(request) # check mock_get_queued_images.assert_called_with(request) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.Controller, "delete_queued_image") def test_delete_queued_image(self, mock_delete_queued_image): # setup mock_delete_queued_image.return_value = self.stub_value # prepare request = webob.Request.blank("/v1/queued_images/" + self.image_id, environ={'REQUEST_METHOD': 'DELETE'}) # call resource = self.cache_manage_filter.process_request(request) # check mock_delete_queued_image.assert_called_with(request, image_id=self.image_id) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) @mock.patch.object(cached_images.Controller, "delete_queued_images") def test_delete_queued_images(self, mock_delete_queued_images): # setup mock_delete_queued_images.return_value = self.stub_value # prepare request = webob.Request.blank("/v1/queued_images", environ={'REQUEST_METHOD': 'DELETE'}) # call resource = self.cache_manage_filter.process_request(request) # check mock_delete_queued_images.assert_called_with(request) self.assertEqual('"' + self.stub_value + '"', resource.body.decode('utf-8')) glance-12.0.0/glance/tests/unit/api/middleware/__init__.py0000664000567000056710000000000012701407047024525 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/api/test_cmd.py0000664000567000056710000001330612701407047022470 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import glance_store as store import mock from oslo_config import cfg from oslo_log import log as logging import six import glance.cmd.api import glance.cmd.cache_cleaner import glance.cmd.cache_pruner import glance.common.config from glance.common import exception as exc import glance.common.wsgi import glance.image_cache.cleaner import glance.image_cache.pruner from glance.tests import utils as test_utils CONF = cfg.CONF class TestGlanceApiCmd(test_utils.BaseTestCase): __argv_backup = None def _do_nothing(self, *args, **kwargs): pass def _raise(self, exc): def fake(*args, **kwargs): raise exc return fake def setUp(self): super(TestGlanceApiCmd, self).setUp() self.__argv_backup = sys.argv sys.argv = ['glance-api'] self.stderr = six.StringIO() sys.stderr = self.stderr store.register_opts(CONF) self.stubs.Set(glance.common.config, 'load_paste_app', self._do_nothing) self.stubs.Set(glance.common.wsgi.Server, 'start', self._do_nothing) self.stubs.Set(glance.common.wsgi.Server, 'wait', self._do_nothing) def tearDown(self): sys.stderr = sys.__stderr__ sys.argv = self.__argv_backup super(TestGlanceApiCmd, self).tearDown() def test_supported_default_store(self): self.config(group='glance_store', default_store='file') glance.cmd.api.main() def test_unsupported_default_store(self): self.stubs.UnsetAll() self.config(group='glance_store', default_store='shouldnotexist') exit = self.assertRaises(SystemExit, glance.cmd.api.main) self.assertEqual(1, exit.code) def test_worker_creation_failure(self): failure = exc.WorkerCreationFailure(reason='test') self.stubs.Set(glance.common.wsgi.Server, 'start', self._raise(failure)) exit = self.assertRaises(SystemExit, glance.cmd.api.main) self.assertEqual(2, exit.code) @mock.patch.object(glance.common.config, 'parse_cache_args') @mock.patch.object(logging, 'setup') @mock.patch.object(glance.image_cache.ImageCache, 'init_driver') @mock.patch.object(glance.image_cache.ImageCache, 'clean') def test_cache_cleaner_main(self, mock_cache_clean, mock_cache_init_driver, mock_log_setup, mock_parse_config): mock_cache_init_driver.return_value = None manager = mock.MagicMock() manager.attach_mock(mock_log_setup, 'mock_log_setup') manager.attach_mock(mock_parse_config, 'mock_parse_config') manager.attach_mock(mock_cache_init_driver, 'mock_cache_init_driver') manager.attach_mock(mock_cache_clean, 'mock_cache_clean') glance.cmd.cache_cleaner.main() expected_call_sequence = [mock.call.mock_parse_config(), mock.call.mock_log_setup(CONF, 'glance'), mock.call.mock_cache_init_driver(), mock.call.mock_cache_clean()] self.assertEqual(expected_call_sequence, manager.mock_calls) @mock.patch.object(glance.image_cache.base.CacheApp, '__init__') def test_cache_cleaner_main_runtime_exception_handling(self, mock_cache): mock_cache.return_value = None self.stubs.Set(glance.image_cache.cleaner.Cleaner, 'run', self._raise(RuntimeError)) exit = self.assertRaises(SystemExit, glance.cmd.cache_cleaner.main) self.assertEqual('ERROR: ', exit.code) @mock.patch.object(glance.common.config, 'parse_cache_args') @mock.patch.object(logging, 'setup') @mock.patch.object(glance.image_cache.ImageCache, 'init_driver') @mock.patch.object(glance.image_cache.ImageCache, 'prune') def test_cache_pruner_main(self, mock_cache_prune, mock_cache_init_driver, mock_log_setup, mock_parse_config): mock_cache_init_driver.return_value = None manager = mock.MagicMock() manager.attach_mock(mock_log_setup, 'mock_log_setup') manager.attach_mock(mock_parse_config, 'mock_parse_config') manager.attach_mock(mock_cache_init_driver, 'mock_cache_init_driver') manager.attach_mock(mock_cache_prune, 'mock_cache_prune') glance.cmd.cache_pruner.main() expected_call_sequence = [mock.call.mock_parse_config(), mock.call.mock_log_setup(CONF, 'glance'), mock.call.mock_cache_init_driver(), mock.call.mock_cache_prune()] self.assertEqual(expected_call_sequence, manager.mock_calls) @mock.patch.object(glance.image_cache.base.CacheApp, '__init__') def test_cache_pruner_main_runtime_exception_handling(self, mock_cache): mock_cache.return_value = None self.stubs.Set(glance.image_cache.pruner.Pruner, 'run', self._raise(RuntimeError)) exit = self.assertRaises(SystemExit, glance.cmd.cache_pruner.main) self.assertEqual('ERROR: ', exit.code) glance-12.0.0/glance/tests/unit/test_glance_replicator.py0000664000567000056710000005331212701407047024632 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import sys import uuid import fixtures import mock from oslo_serialization import jsonutils import six from six import moves import webob from glance.cmd import replicator as glance_replicator from glance.common import exception from glance.tests.unit import utils as unit_test_utils from glance.tests import utils as test_utils IMG_RESPONSE_ACTIVE = { 'content-length': '0', 'property-image_state': 'available', 'min_ram': '0', 'disk_format': 'aki', 'updated_at': '2012-06-25T02:10:36', 'date': 'Thu, 28 Jun 2012 07:20:05 GMT', 'owner': '8aef75b5c0074a59aa99188fdb4b9e90', 'id': '6d55dd55-053a-4765-b7bc-b30df0ea3861', 'size': '4660272', 'property-image_location': 'ubuntu-bucket/oneiric-server-cloudimg-amd64-' 'vmlinuz-generic.manifest.xml', 'property-architecture': 'x86_64', 'etag': 'f46cfe7fb3acaff49a3567031b9b53bb', 'location': 'http://127.0.0.1:9292/v1/images/' '6d55dd55-053a-4765-b7bc-b30df0ea3861', 'container_format': 'aki', 'status': 'active', 'deleted': 'False', 'min_disk': '0', 'is_public': 'False', 'name': 'ubuntu-bucket/oneiric-server-cloudimg-amd64-vmlinuz-generic', 'checksum': 'f46cfe7fb3acaff49a3567031b9b53bb', 'created_at': '2012-06-25T02:10:32', 'protected': 'False', 'content-type': 'text/html; charset=UTF-8' } IMG_RESPONSE_QUEUED = copy.copy(IMG_RESPONSE_ACTIVE) IMG_RESPONSE_QUEUED['status'] = 'queued' IMG_RESPONSE_QUEUED['id'] = '49b2c782-ee10-4692-84f8-3942e9432c4b' IMG_RESPONSE_QUEUED['location'] = ('http://127.0.0.1:9292/v1/images/' + IMG_RESPONSE_QUEUED['id']) class FakeHTTPConnection(object): def __init__(self): self.count = 0 self.reqs = {} self.last_req = None self.host = 'localhost' self.port = 9292 def prime_request(self, method, url, in_body, in_headers, out_code, out_body, out_headers): if not url.startswith('/'): url = '/' + url url = unit_test_utils.sort_url_by_qs_keys(url) hkeys = sorted(in_headers.keys()) hashable = (method, url, in_body, ' '.join(hkeys)) flat_headers = [] for key in out_headers: flat_headers.append((key, out_headers[key])) self.reqs[hashable] = (out_code, out_body, flat_headers) def request(self, method, url, body, headers): self.count += 1 url = unit_test_utils.sort_url_by_qs_keys(url) hkeys = sorted(headers.keys()) hashable = (method, url, body, ' '.join(hkeys)) if hashable not in self.reqs: options = [] for h in self.reqs: options.append(repr(h)) raise Exception('No such primed request: %s "%s"\n' '%s\n\n' 'Available:\n' '%s' % (method, url, hashable, '\n\n'.join(options))) self.last_req = hashable def getresponse(self): class FakeResponse(object): def __init__(self, args): (code, body, headers) = args self.body = six.StringIO(body) self.headers = headers self.status = code def read(self, count=1000000): return self.body.read(count) def getheaders(self): return self.headers return FakeResponse(self.reqs[self.last_req]) class ImageServiceTestCase(test_utils.BaseTestCase): def test_rest_errors(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') for code, exc in [(400, webob.exc.HTTPBadRequest), (401, webob.exc.HTTPUnauthorized), (403, webob.exc.HTTPForbidden), (409, webob.exc.HTTPConflict), (500, webob.exc.HTTPInternalServerError)]: c.conn.prime_request('GET', ('v1/images/' '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'), '', {'x-auth-token': 'noauth'}, code, '', {}) self.assertRaises(exc, c.get_image, '5dcddce0-cba5-4f18-9cf4-9853c7b207a6') def test_rest_get_images(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') # Two images, one of which is queued resp = {'images': [IMG_RESPONSE_ACTIVE, IMG_RESPONSE_QUEUED]} c.conn.prime_request('GET', 'v1/images/detail?is_public=None', '', {'x-auth-token': 'noauth'}, 200, jsonutils.dumps(resp), {}) c.conn.prime_request('GET', ('v1/images/detail?marker=%s&is_public=None' % IMG_RESPONSE_QUEUED['id']), '', {'x-auth-token': 'noauth'}, 200, jsonutils.dumps({'images': []}), {}) imgs = list(c.get_images()) self.assertEqual(2, len(imgs)) self.assertEqual(2, c.conn.count) def test_rest_get_image(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') image_contents = 'THISISTHEIMAGEBODY' c.conn.prime_request('GET', 'v1/images/%s' % IMG_RESPONSE_ACTIVE['id'], '', {'x-auth-token': 'noauth'}, 200, image_contents, IMG_RESPONSE_ACTIVE) body = c.get_image(IMG_RESPONSE_ACTIVE['id']) self.assertEqual(image_contents, body.read()) def test_rest_header_list_to_dict(self): i = [('x-image-meta-banana', 42), ('gerkin', 12), ('x-image-meta-property-frog', 11), ('x-image-meta-property-duck', 12)] o = glance_replicator.ImageService._header_list_to_dict(i) self.assertIn('banana', o) self.assertIn('gerkin', o) self.assertIn('properties', o) self.assertIn('frog', o['properties']) self.assertIn('duck', o['properties']) self.assertNotIn('x-image-meta-banana', o) def test_rest_get_image_meta(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') c.conn.prime_request('HEAD', 'v1/images/%s' % IMG_RESPONSE_ACTIVE['id'], '', {'x-auth-token': 'noauth'}, 200, '', IMG_RESPONSE_ACTIVE) header = c.get_image_meta(IMG_RESPONSE_ACTIVE['id']) self.assertIn('id', header) def test_rest_dict_to_headers(self): i = {'banana': 42, 'gerkin': 12, 'properties': {'frog': 1, 'kernel_id': None} } o = glance_replicator.ImageService._dict_to_headers(i) self.assertIn('x-image-meta-banana', o) self.assertIn('x-image-meta-gerkin', o) self.assertIn('x-image-meta-property-frog', o) self.assertIn('x-image-meta-property-kernel_id', o) self.assertEqual(o['x-image-meta-property-kernel_id'], '') self.assertNotIn('properties', o) def test_rest_add_image(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') image_body = 'THISISANIMAGEBODYFORSURE!' image_meta_with_proto = { 'x-auth-token': 'noauth', 'Content-Type': 'application/octet-stream', 'Content-Length': len(image_body) } for key in IMG_RESPONSE_ACTIVE: image_meta_with_proto[ 'x-image-meta-%s' % key] = IMG_RESPONSE_ACTIVE[key] c.conn.prime_request('POST', 'v1/images', image_body, image_meta_with_proto, 200, '', IMG_RESPONSE_ACTIVE) headers, body = c.add_image(IMG_RESPONSE_ACTIVE, image_body) self.assertEqual(IMG_RESPONSE_ACTIVE, headers) self.assertEqual(1, c.conn.count) def test_rest_add_image_meta(self): c = glance_replicator.ImageService(FakeHTTPConnection(), 'noauth') image_meta = {'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'} image_meta_headers = glance_replicator.ImageService._dict_to_headers( image_meta) image_meta_headers['x-auth-token'] = 'noauth' image_meta_headers['Content-Type'] = 'application/octet-stream' c.conn.prime_request('PUT', 'v1/images/%s' % image_meta['id'], '', image_meta_headers, 200, '', '') headers, body = c.add_image_meta(image_meta) class FakeHttpResponse(object): def __init__(self, headers, data): self.headers = headers self.data = six.BytesIO(data) def getheaders(self): return self.headers def read(self, amt=None): return self.data.read(amt) FAKEIMAGES = [{'status': 'active', 'size': 100, 'dontrepl': 'banana', 'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'}, {'status': 'deleted', 'size': 200, 'dontrepl': 'banana', 'id': 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b'}, {'status': 'active', 'size': 300, 'dontrepl': 'banana', 'id': '37ff82db-afca-48c7-ae0b-ddc7cf83e3db'}] FAKEIMAGES_LIVEMASTER = [{'status': 'active', 'size': 100, 'dontrepl': 'banana', 'id': '5dcddce0-cba5-4f18-9cf4-9853c7b207a6'}, {'status': 'deleted', 'size': 200, 'dontrepl': 'banana', 'id': 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b'}, {'status': 'deleted', 'size': 300, 'dontrepl': 'banana', 'id': '37ff82db-afca-48c7-ae0b-ddc7cf83e3db'}, {'status': 'active', 'size': 100, 'dontrepl': 'banana', 'id': '15648dd7-8dd0-401c-bd51-550e1ba9a088'}] class FakeImageService(object): def __init__(self, http_conn, authtoken): self.authtoken = authtoken def get_images(self): if self.authtoken == 'livemastertoken': return FAKEIMAGES_LIVEMASTER return FAKEIMAGES def get_image(self, id): return FakeHttpResponse({}, b'data') def get_image_meta(self, id): for img in FAKEIMAGES: if img['id'] == id: return img return {} def add_image_meta(self, meta): return {'status': 200}, None def add_image(self, meta, data): return {'status': 200}, None def get_image_service(): return FakeImageService def check_no_args(command, args): options = moves.UserDict() no_args_error = False orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service command(options, args) except TypeError: no_args_error = True finally: glance_replicator.get_image_service = orig_img_service return no_args_error def check_bad_args(command, args): options = moves.UserDict() bad_args_error = False orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service command(options, args) except ValueError: bad_args_error = True finally: glance_replicator.get_image_service = orig_img_service return bad_args_error class ReplicationCommandsTestCase(test_utils.BaseTestCase): @mock.patch.object(glance_replicator, 'lookup_command') def test_help(self, mock_lookup_command): option = mock.Mock() mock_lookup_command.return_value = "fake_return" glance_replicator.print_help(option, []) glance_replicator.print_help(option, ['dump']) glance_replicator.print_help(option, ['fake_command']) self.assertEqual(2, mock_lookup_command.call_count) def test_replication_size(self): options = moves.UserDict() options.slavetoken = 'slavetoken' args = ['localhost:9292'] stdout = sys.stdout orig_img_service = glance_replicator.get_image_service sys.stdout = six.StringIO() try: glance_replicator.get_image_service = get_image_service glance_replicator.replication_size(options, args) sys.stdout.seek(0) output = sys.stdout.read() finally: sys.stdout = stdout glance_replicator.get_image_service = orig_img_service output = output.rstrip() self.assertEqual('Total size is 400 bytes across 2 images', output) def test_replication_size_with_no_args(self): args = [] command = glance_replicator.replication_size self.assertTrue(check_no_args(command, args)) def test_replication_size_with_bad_args(self): args = ['aaa'] command = glance_replicator.replication_size self.assertTrue(check_bad_args(command, args)) def test_replication_dump(self): tempdir = self.useFixture(fixtures.TempDir()).path options = moves.UserDict() options.chunksize = 4096 options.mastertoken = 'mastertoken' options.metaonly = False args = ['localhost:9292', tempdir] orig_img_service = glance_replicator.get_image_service self.addCleanup(setattr, glance_replicator, 'get_image_service', orig_img_service) glance_replicator.get_image_service = get_image_service glance_replicator.replication_dump(options, args) for active in ['5dcddce0-cba5-4f18-9cf4-9853c7b207a6', '37ff82db-afca-48c7-ae0b-ddc7cf83e3db']: imgfile = os.path.join(tempdir, active) self.assertTrue(os.path.exists(imgfile)) self.assertTrue(os.path.exists('%s.img' % imgfile)) with open(imgfile) as f: d = jsonutils.loads(f.read()) self.assertIn('status', d) self.assertIn('id', d) self.assertIn('size', d) for inactive in ['f4da1d2a-40e8-4710-b3aa-0222a4cc887b']: imgfile = os.path.join(tempdir, inactive) self.assertTrue(os.path.exists(imgfile)) self.assertFalse(os.path.exists('%s.img' % imgfile)) with open(imgfile) as f: d = jsonutils.loads(f.read()) self.assertIn('status', d) self.assertIn('id', d) self.assertIn('size', d) def test_replication_dump_with_no_args(self): args = [] command = glance_replicator.replication_dump self.assertTrue(check_no_args(command, args)) def test_replication_dump_with_bad_args(self): args = ['aaa', 'bbb'] command = glance_replicator.replication_dump self.assertTrue(check_bad_args(command, args)) def test_replication_load(self): tempdir = self.useFixture(fixtures.TempDir()).path def write_image(img, data): imgfile = os.path.join(tempdir, img['id']) with open(imgfile, 'w') as f: f.write(jsonutils.dumps(img)) if data: with open('%s.img' % imgfile, 'w') as f: f.write(data) for img in FAKEIMAGES: cimg = copy.copy(img) # We need at least one image where the stashed metadata on disk # is newer than what the fake has if cimg['id'] == '5dcddce0-cba5-4f18-9cf4-9853c7b207a6': cimg['extra'] = 'thisissomeextra' # This is an image where the metadata change should be ignored if cimg['id'] == 'f4da1d2a-40e8-4710-b3aa-0222a4cc887b': cimg['dontrepl'] = 'thisisyetmoreextra' write_image(cimg, 'kjdhfkjshdfkjhsdkfd') # And an image which isn't on the destination at all new_id = str(uuid.uuid4()) cimg['id'] = new_id write_image(cimg, 'dskjfhskjhfkfdhksjdhf') # And an image which isn't on the destination, but lacks image # data new_id_missing_data = str(uuid.uuid4()) cimg['id'] = new_id_missing_data write_image(cimg, None) # A file which should be ignored badfile = os.path.join(tempdir, 'kjdfhf') with open(badfile, 'w') as f: f.write(jsonutils.dumps([1, 2, 3, 4, 5])) # Finally, we're ready to test options = moves.UserDict() options.dontreplicate = 'dontrepl dontreplabsent' options.slavetoken = 'slavetoken' args = ['localhost:9292', tempdir] orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service updated = glance_replicator.replication_load(options, args) finally: glance_replicator.get_image_service = orig_img_service self.assertIn('5dcddce0-cba5-4f18-9cf4-9853c7b207a6', updated) self.assertNotIn('f4da1d2a-40e8-4710-b3aa-0222a4cc887b', updated) self.assertIn(new_id, updated) self.assertNotIn(new_id_missing_data, updated) def test_replication_load_with_no_args(self): args = [] command = glance_replicator.replication_load self.assertTrue(check_no_args(command, args)) def test_replication_load_with_bad_args(self): args = ['aaa', 'bbb'] command = glance_replicator.replication_load self.assertTrue(check_bad_args(command, args)) def test_replication_livecopy(self): options = moves.UserDict() options.chunksize = 4096 options.dontreplicate = 'dontrepl dontreplabsent' options.mastertoken = 'livemastertoken' options.slavetoken = 'liveslavetoken' options.metaonly = False args = ['localhost:9292', 'localhost:9393'] orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service updated = glance_replicator.replication_livecopy(options, args) finally: glance_replicator.get_image_service = orig_img_service self.assertEqual(2, len(updated)) def test_replication_livecopy_with_no_args(self): args = [] command = glance_replicator.replication_livecopy self.assertTrue(check_no_args(command, args)) def test_replication_livecopy_with_bad_args(self): args = ['aaa', 'bbb'] command = glance_replicator.replication_livecopy self.assertTrue(check_bad_args(command, args)) def test_replication_compare(self): options = moves.UserDict() options.chunksize = 4096 options.dontreplicate = 'dontrepl dontreplabsent' options.mastertoken = 'livemastertoken' options.slavetoken = 'liveslavetoken' options.metaonly = False args = ['localhost:9292', 'localhost:9393'] orig_img_service = glance_replicator.get_image_service try: glance_replicator.get_image_service = get_image_service differences = glance_replicator.replication_compare(options, args) finally: glance_replicator.get_image_service = orig_img_service self.assertIn('15648dd7-8dd0-401c-bd51-550e1ba9a088', differences) self.assertEqual(differences['15648dd7-8dd0-401c-bd51-550e1ba9a088'], 'missing') self.assertIn('37ff82db-afca-48c7-ae0b-ddc7cf83e3db', differences) self.assertEqual(differences['37ff82db-afca-48c7-ae0b-ddc7cf83e3db'], 'diff') def test_replication_compare_with_no_args(self): args = [] command = glance_replicator.replication_compare self.assertTrue(check_no_args(command, args)) def test_replication_compare_with_bad_args(self): args = ['aaa', 'bbb'] command = glance_replicator.replication_compare self.assertTrue(check_bad_args(command, args)) class ReplicationUtilitiesTestCase(test_utils.BaseTestCase): def test_check_upload_response_headers(self): glance_replicator._check_upload_response_headers({'status': 'active'}, None) d = {'image': {'status': 'active'}} glance_replicator._check_upload_response_headers({}, jsonutils.dumps(d)) self.assertRaises( exception.UploadException, glance_replicator._check_upload_response_headers, {}, None) def test_image_present(self): client = FakeImageService(None, 'noauth') self.assertTrue(glance_replicator._image_present( client, '5dcddce0-cba5-4f18-9cf4-9853c7b207a6')) self.assertFalse(glance_replicator._image_present( client, uuid.uuid4())) def test_dict_diff(self): a = {'a': 1, 'b': 2, 'c': 3} b = {'a': 1, 'b': 2} c = {'a': 1, 'b': 1, 'c': 3} d = {'a': 1, 'b': 2, 'c': 3, 'd': 4} # Only things that the first dict has which the second dict doesn't # matter here. self.assertFalse(glance_replicator._dict_diff(a, a)) self.assertTrue(glance_replicator._dict_diff(a, b)) self.assertTrue(glance_replicator._dict_diff(a, c)) self.assertFalse(glance_replicator._dict_diff(a, d)) glance-12.0.0/glance/tests/unit/v2/0000775000567000056710000000000012701407204020062 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/v2/test_schemas_resource.py0000664000567000056710000000516212701407047025036 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance.api.v2.schemas import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils class TestSchemasController(test_utils.BaseTestCase): def setUp(self): super(TestSchemasController, self).setUp() self.controller = glance.api.v2.schemas.Controller() def test_image(self): req = unit_test_utils.get_fake_request() output = self.controller.image(req) self.assertEqual('image', output['name']) expected = set(['status', 'name', 'tags', 'checksum', 'created_at', 'disk_format', 'updated_at', 'visibility', 'self', 'file', 'container_format', 'schema', 'id', 'size', 'direct_url', 'min_ram', 'min_disk', 'protected', 'locations', 'owner', 'virtual_size']) self.assertEqual(expected, set(output['properties'].keys())) def test_images(self): req = unit_test_utils.get_fake_request() output = self.controller.images(req) self.assertEqual('images', output['name']) expected = set(['images', 'schema', 'first', 'next']) self.assertEqual(expected, set(output['properties'].keys())) expected = set(['{schema}', '{first}', '{next}']) actual = set([link['href'] for link in output['links']]) self.assertEqual(expected, actual) def test_member(self): req = unit_test_utils.get_fake_request() output = self.controller.member(req) self.assertEqual('member', output['name']) expected = set(['status', 'created_at', 'updated_at', 'image_id', 'member_id', 'schema']) self.assertEqual(expected, set(output['properties'].keys())) def test_members(self): req = unit_test_utils.get_fake_request() output = self.controller.members(req) self.assertEqual('members', output['name']) expected = set(['schema', 'members']) self.assertEqual(expected, set(output['properties'].keys())) glance-12.0.0/glance/tests/unit/v2/test_image_actions_resource.py0000664000567000056710000001451612701407047026220 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store as store import webob import glance.api.v2.image_actions as image_actions import glance.context from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils BASE_URI = unit_test_utils.BASE_URI USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'is_public': False, 'properties': {}, 'checksum': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'virtual_size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } obj.update(kwargs) return obj class TestImageActionsController(base.IsolatedUnitTest): def setUp(self): super(TestImageActionsController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self.store = unit_test_utils.FakeStoreAPI() for i in range(1, 4): self.store.data['%s/fake_location_%i' % (BASE_URI, i)] = ('Z', 1) self.store_utils = unit_test_utils.FakeStoreUtils(self.store) self.controller = image_actions.ImageActionsController( self.db, self.policy, self.notifier, self.store) self.controller.gateway.store_utils = self.store_utils store.create_stores() def _get_fake_context(self, user=USER1, tenant=TENANT1, roles=None, is_admin=False): if roles is None: roles = ['member'] kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': is_admin, } context = glance.context.RequestContext(**kwargs) return context def _create_image(self, status): self.images = [ _db_fixture(UUID1, owner=TENANT1, checksum=CHKSUM, name='1', size=256, virtual_size=1024, is_public=True, locations=[{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}], disk_format='raw', container_format='bare', status=status), ] context = self._get_fake_context() [self.db.image_create(context, image) for image in self.images] def test_deactivate_from_active(self): self._create_image('active') request = unit_test_utils.get_fake_request() self.controller.deactivate(request, UUID1) image = self.db.image_get(request.context, UUID1) self.assertEqual('deactivated', image['status']) def test_deactivate_from_deactivated(self): self._create_image('deactivated') request = unit_test_utils.get_fake_request() self.controller.deactivate(request, UUID1) image = self.db.image_get(request.context, UUID1) self.assertEqual('deactivated', image['status']) def _test_deactivate_from_wrong_status(self, status): # deactivate will yield an error if the initial status is anything # other than 'active' or 'deactivated' self._create_image(status) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.deactivate, request, UUID1) def test_deactivate_from_queued(self): self._test_deactivate_from_wrong_status('queued') def test_deactivate_from_saving(self): self._test_deactivate_from_wrong_status('saving') def test_deactivate_from_killed(self): self._test_deactivate_from_wrong_status('killed') def test_deactivate_from_pending_delete(self): self._test_deactivate_from_wrong_status('pending_delete') def test_deactivate_from_deleted(self): self._test_deactivate_from_wrong_status('deleted') def test_reactivate_from_active(self): self._create_image('active') request = unit_test_utils.get_fake_request() self.controller.reactivate(request, UUID1) image = self.db.image_get(request.context, UUID1) self.assertEqual('active', image['status']) def test_reactivate_from_deactivated(self): self._create_image('deactivated') request = unit_test_utils.get_fake_request() self.controller.reactivate(request, UUID1) image = self.db.image_get(request.context, UUID1) self.assertEqual('active', image['status']) def _test_reactivate_from_wrong_status(self, status): # reactivate will yield an error if the initial status is anything # other than 'active' or 'deactivated' self._create_image(status) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.reactivate, request, UUID1) def test_reactivate_from_queued(self): self._test_reactivate_from_wrong_status('queued') def test_reactivate_from_saving(self): self._test_reactivate_from_wrong_status('saving') def test_reactivate_from_killed(self): self._test_reactivate_from_wrong_status('killed') def test_reactivate_from_pending_delete(self): self._test_reactivate_from_wrong_status('pending_delete') def test_reactivate_from_deleted(self): self._test_reactivate_from_wrong_status('deleted') glance-12.0.0/glance/tests/unit/v2/test_image_tags_resource.py0000664000567000056710000001005012701407047025503 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob import glance.api.v2.image_tags from glance.common import exception from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils import glance.tests.unit.v2.test_image_data_resource as image_data_tests import glance.tests.utils as test_utils class TestImageTagsController(base.IsolatedUnitTest): def setUp(self): super(TestImageTagsController, self).setUp() self.db = unit_test_utils.FakeDB() self.controller = glance.api.v2.image_tags.Controller(self.db) def test_create_tag(self): request = unit_test_utils.get_fake_request() self.controller.update(request, unit_test_utils.UUID1, 'dink') context = request.context tags = self.db.image_tag_get_all(context, unit_test_utils.UUID1) self.assertEqual(1, len([tag for tag in tags if tag == 'dink'])) def test_create_too_many_tags(self): self.config(image_tag_quota=0) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, unit_test_utils.UUID1, 'dink') def test_create_duplicate_tag_ignored(self): request = unit_test_utils.get_fake_request() self.controller.update(request, unit_test_utils.UUID1, 'dink') self.controller.update(request, unit_test_utils.UUID1, 'dink') context = request.context tags = self.db.image_tag_get_all(context, unit_test_utils.UUID1) self.assertEqual(1, len([tag for tag in tags if tag == 'dink'])) def test_update_tag_of_non_existing_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, request, "abcd", "dink") def test_delete_tag_forbidden(self): def fake_get(self): raise exception.Forbidden() image_repo = image_data_tests.FakeImageRepo() image_repo.get = fake_get def get_fake_repo(self): return image_repo self.controller.gateway.get_repo = get_fake_repo request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, unit_test_utils.UUID1, "ping") def test_delete_tag(self): request = unit_test_utils.get_fake_request() self.controller.delete(request, unit_test_utils.UUID1, 'ping') def test_delete_tag_not_found(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, unit_test_utils.UUID1, 'what') def test_delete_tag_of_non_existing_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, "abcd", "dink") class TestImagesSerializer(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializer, self).setUp() self.serializer = glance.api.v2.image_tags.ResponseSerializer() def test_create_tag(self): response = webob.Response() self.serializer.update(response, None) self.assertEqual(204, response.status_int) def test_delete_tag(self): response = webob.Response() self.serializer.delete(response, None) self.assertEqual(204, response.status_int) glance-12.0.0/glance/tests/unit/v2/__init__.py0000664000567000056710000000000012701407047022166 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/v2/test_metadef_resources.py0000664000567000056710000023541312701407047025207 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import webob from glance.api.v2 import metadef_namespaces as namespaces from glance.api.v2 import metadef_objects as objects from glance.api.v2 import metadef_properties as properties from glance.api.v2 import metadef_resource_types as resource_types from glance.api.v2 import metadef_tags as tags from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) ISOTIME = '2012-05-16T15:27:36Z' NAMESPACE1 = 'Namespace1' NAMESPACE2 = 'Namespace2' NAMESPACE3 = 'Namespace3' NAMESPACE4 = 'Namespace4' NAMESPACE5 = 'Namespace5' NAMESPACE6 = 'Namespace6' PROPERTY1 = 'Property1' PROPERTY2 = 'Property2' PROPERTY3 = 'Property3' PROPERTY4 = 'Property4' OBJECT1 = 'Object1' OBJECT2 = 'Object2' OBJECT3 = 'Object3' RESOURCE_TYPE1 = 'ResourceType1' RESOURCE_TYPE2 = 'ResourceType2' RESOURCE_TYPE3 = 'ResourceType3' RESOURCE_TYPE4 = 'ResourceType4' TAG1 = 'Tag1' TAG2 = 'Tag2' TAG3 = 'Tag3' TAG4 = 'Tag4' TAG5 = 'Tag5' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' PREFIX1 = 'pref' def _db_namespace_fixture(namespace, **kwargs): obj = { 'namespace': namespace, 'display_name': None, 'description': None, 'visibility': 'public', 'protected': False, 'owner': None, } obj.update(kwargs) return obj def _db_property_fixture(name, **kwargs): obj = { 'name': name, 'json_schema': {"type": "string", "title": "title"}, } obj.update(kwargs) return obj def _db_object_fixture(name, **kwargs): obj = { 'name': name, 'description': None, 'json_schema': {}, 'required': '[]', } obj.update(kwargs) return obj def _db_resource_type_fixture(name, **kwargs): obj = { 'name': name, 'protected': False, } obj.update(kwargs) return obj def _db_tag_fixture(name, **kwargs): obj = { 'name': name } obj.update(kwargs) return obj def _db_tags_fixture(tag_names=None): tag_list = [] if not tag_names: tag_names = [TAG1, TAG2, TAG3] for tag_name in tag_names: tag = tags.MetadefTag() tag.name = tag_name tag_list.append(tag) return tag_list def _db_namespace_resource_type_fixture(name, **kwargs): obj = { 'name': name, 'properties_target': None, 'prefix': None, } obj.update(kwargs) return obj class TestMetadefsControllers(base.IsolatedUnitTest): def setUp(self): super(TestMetadefsControllers, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self._create_namespaces() self._create_properties() self._create_objects() self._create_resource_types() self._create_namespaces_resource_types() self._create_tags() self.namespace_controller = namespaces.NamespaceController( self.db, self.policy, self.notifier) self.property_controller = properties.NamespacePropertiesController( self.db, self.policy, self.notifier) self.object_controller = objects.MetadefObjectsController( self.db, self.policy, self.notifier) self.rt_controller = resource_types.ResourceTypeController( self.db, self.policy, self.notifier) self.tag_controller = tags.TagsController( self.db, self.policy, self.notifier) self.deserializer = objects.RequestDeserializer() def _create_namespaces(self): req = unit_test_utils.get_fake_request() self.namespaces = [ _db_namespace_fixture(NAMESPACE1, owner=TENANT1, visibility='private', protected=True), _db_namespace_fixture(NAMESPACE2, owner=TENANT2, visibility='private'), _db_namespace_fixture(NAMESPACE3, owner=TENANT3), _db_namespace_fixture(NAMESPACE5, owner=TENANT4), _db_namespace_fixture(NAMESPACE6, owner=TENANT4), ] [self.db.metadef_namespace_create(req.context, namespace) for namespace in self.namespaces] def _create_properties(self): req = unit_test_utils.get_fake_request() self.properties = [ (NAMESPACE3, _db_property_fixture(PROPERTY1)), (NAMESPACE3, _db_property_fixture(PROPERTY2)), (NAMESPACE1, _db_property_fixture(PROPERTY1)), (NAMESPACE6, _db_property_fixture(PROPERTY4)), ] [self.db.metadef_property_create(req.context, namespace, property) for namespace, property in self.properties] def _create_objects(self): req = unit_test_utils.get_fake_request() self.objects = [ (NAMESPACE3, _db_object_fixture(OBJECT1)), (NAMESPACE3, _db_object_fixture(OBJECT2)), (NAMESPACE1, _db_object_fixture(OBJECT1)), ] [self.db.metadef_object_create(req.context, namespace, object) for namespace, object in self.objects] def _create_resource_types(self): req = unit_test_utils.get_fake_request() self.resource_types = [ _db_resource_type_fixture(RESOURCE_TYPE1), _db_resource_type_fixture(RESOURCE_TYPE2), _db_resource_type_fixture(RESOURCE_TYPE4), ] [self.db.metadef_resource_type_create(req.context, resource_type) for resource_type in self.resource_types] def _create_tags(self): req = unit_test_utils.get_fake_request() self.tags = [ (NAMESPACE3, _db_tag_fixture(TAG1)), (NAMESPACE3, _db_tag_fixture(TAG2)), (NAMESPACE1, _db_tag_fixture(TAG1)), ] [self.db.metadef_tag_create(req.context, namespace, tag) for namespace, tag in self.tags] def _create_namespaces_resource_types(self): req = unit_test_utils.get_fake_request(is_admin=True) self.ns_resource_types = [ (NAMESPACE1, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), (NAMESPACE3, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), (NAMESPACE2, _db_namespace_resource_type_fixture(RESOURCE_TYPE1)), (NAMESPACE2, _db_namespace_resource_type_fixture(RESOURCE_TYPE2)), (NAMESPACE6, _db_namespace_resource_type_fixture(RESOURCE_TYPE4, prefix=PREFIX1)), ] [self.db.metadef_resource_type_association_create(req.context, namespace, ns_resource_type) for namespace, ns_resource_type in self.ns_resource_types] def assertNotificationLog(self, expected_event_type, expected_payloads): events = [{'type': expected_event_type, 'payload': payload} for payload in expected_payloads] self.assertNotificationsLog(events) def assertNotificationsLog(self, expected_events): output_logs = self.notifier.get_logs() expected_logs_count = len(expected_events) self.assertEqual(expected_logs_count, len(output_logs)) for output_log, event in zip(output_logs, expected_events): self.assertEqual('INFO', output_log['notification_type']) self.assertEqual(event['type'], output_log['event_type']) self.assertDictContainsSubset(event['payload'], output_log['payload']) self.notifier.log = [] def test_namespace_index(self): request = unit_test_utils.get_fake_request() output = self.namespace_controller.index(request) output = output.to_dict() self.assertEqual(4, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) expected = set([NAMESPACE1, NAMESPACE3, NAMESPACE5, NAMESPACE6]) self.assertEqual(expected, actual) def test_namespace_index_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) output = self.namespace_controller.index(request) output = output.to_dict() self.assertEqual(5, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) expected = set([NAMESPACE1, NAMESPACE2, NAMESPACE3, NAMESPACE5, NAMESPACE6]) self.assertEqual(expected, actual) def test_namespace_index_visibility_public(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) filters = {'visibility': 'public'} output = self.namespace_controller.index(request, filters=filters) output = output.to_dict() self.assertEqual(3, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) expected = set([NAMESPACE3, NAMESPACE5, NAMESPACE6]) self.assertEqual(expected, actual) def test_namespace_index_resource_type(self): request = unit_test_utils.get_fake_request() filters = {'resource_types': [RESOURCE_TYPE1]} output = self.namespace_controller.index(request, filters=filters) output = output.to_dict() self.assertEqual(2, len(output['namespaces'])) actual = set([namespace.namespace for namespace in output['namespaces']]) expected = set([NAMESPACE1, NAMESPACE3]) self.assertEqual(expected, actual) def test_namespace_show(self): request = unit_test_utils.get_fake_request() output = self.namespace_controller.show(request, NAMESPACE1) output = output.to_dict() self.assertEqual(NAMESPACE1, output['namespace']) self.assertEqual(TENANT1, output['owner']) self.assertTrue(output['protected']) self.assertEqual('private', output['visibility']) def test_namespace_show_with_related_resources(self): request = unit_test_utils.get_fake_request() output = self.namespace_controller.show(request, NAMESPACE3) output = output.to_dict() self.assertEqual(NAMESPACE3, output['namespace']) self.assertEqual(TENANT3, output['owner']) self.assertFalse(output['protected']) self.assertEqual('public', output['visibility']) self.assertEqual(2, len(output['properties'])) actual = set([property for property in output['properties']]) expected = set([PROPERTY1, PROPERTY2]) self.assertEqual(expected, actual) self.assertEqual(2, len(output['objects'])) actual = set([object.name for object in output['objects']]) expected = set([OBJECT1, OBJECT2]) self.assertEqual(expected, actual) self.assertEqual(1, len(output['resource_type_associations'])) actual = set([rt.name for rt in output['resource_type_associations']]) expected = set([RESOURCE_TYPE1]) self.assertEqual(expected, actual) def test_namespace_show_with_property_prefix(self): request = unit_test_utils.get_fake_request() rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' rt = self.rt_controller.create(request, rt, NAMESPACE3) object = objects.MetadefObject() object.name = OBJECT3 object.required = [] property = properties.PropertyType() property.name = PROPERTY2 property.type = 'string' property.title = 'title' object.properties = {'prop1': property} object = self.object_controller.create(request, object, NAMESPACE3) self.assertNotificationsLog([ { 'type': 'metadef_resource_type.create', 'payload': { 'namespace': NAMESPACE3, 'name': RESOURCE_TYPE2, 'prefix': 'pref', 'properties_target': None, } }, { 'type': 'metadef_object.create', 'payload': { 'name': OBJECT3, 'namespace': NAMESPACE3, 'properties': [{ 'name': 'prop1', 'additionalItems': None, 'confidential': None, 'title': u'title', 'default': None, 'pattern': None, 'enum': None, 'maximum': None, 'minItems': None, 'minimum': None, 'maxItems': None, 'minLength': None, 'uniqueItems': None, 'maxLength': None, 'items': None, 'type': u'string', 'description': None }], 'required': [], 'description': None, } } ]) filters = {'resource_type': RESOURCE_TYPE2} output = self.namespace_controller.show(request, NAMESPACE3, filters) output = output.to_dict() [self.assertTrue(property_name.startswith(rt.prefix)) for property_name in output['properties'].keys()] for object in output['objects']: [self.assertTrue(property_name.startswith(rt.prefix)) for property_name in object.properties.keys()] def test_namespace_show_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, 'FakeName') def test_namespace_show_non_visible(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationLog("metadef_namespace.delete", [{'namespace': NAMESPACE2}]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_notification_disabled(self): self.config(disabled_notifications=["metadef_namespace.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT2) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_notification_group_disabled(self): self.config(disabled_notifications=["metadef_namespace"]) request = unit_test_utils.get_fake_request(tenant=TENANT2) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_notification_create_disabled(self): self.config(disabled_notifications=["metadef_namespace.create"]) request = unit_test_utils.get_fake_request(tenant=TENANT2) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationLog("metadef_namespace.delete", [{'namespace': NAMESPACE2}]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete, request, 'FakeName') self.assertNotificationsLog([]) def test_namespace_delete_non_visible(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete, request, NAMESPACE2) self.assertNotificationsLog([]) def test_namespace_delete_non_visible_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.namespace_controller.delete(request, NAMESPACE2) self.assertNotificationLog("metadef_namespace.delete", [{'namespace': NAMESPACE2}]) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE2) def test_namespace_delete_protected(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete, request, NAMESPACE1) self.assertNotificationsLog([]) def test_namespace_delete_protected_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete, request, NAMESPACE1) self.assertNotificationsLog([]) def test_namespace_delete_with_contents(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.namespace_controller.delete(request, NAMESPACE3) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE3) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE3, OBJECT1) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE3, OBJECT1) def test_namespace_delete_properties(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.namespace_controller.delete_properties(request, NAMESPACE3) output = self.property_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['properties'])) self.assertNotificationLog("metadef_namespace.delete_properties", [{'namespace': NAMESPACE3}]) def test_namespace_delete_properties_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete_properties, request, NAMESPACE3) self.assertNotificationsLog([]) def test_namespace_delete_properties_other_owner_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.namespace_controller.delete_properties(request, NAMESPACE3) output = self.property_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['properties'])) self.assertNotificationLog("metadef_namespace.delete_properties", [{'namespace': NAMESPACE3}]) def test_namespace_non_existing_delete_properties(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete_properties, request, NAMESPACE4) self.assertNotificationsLog([]) def test_namespace_delete_objects(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.namespace_controller.delete_objects(request, NAMESPACE3) output = self.object_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['objects'])) self.assertNotificationLog("metadef_namespace.delete_objects", [{'namespace': NAMESPACE3}]) def test_namespace_delete_objects_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete_objects, request, NAMESPACE3) self.assertNotificationsLog([]) def test_namespace_delete_objects_other_owner_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.namespace_controller.delete_objects(request, NAMESPACE3) output = self.object_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['objects'])) self.assertNotificationLog("metadef_namespace.delete_objects", [{'namespace': NAMESPACE3}]) def test_namespace_non_existing_delete_objects(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete_objects, request, NAMESPACE4) self.assertNotificationsLog([]) def test_namespace_delete_tags(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.namespace_controller.delete_tags(request, NAMESPACE3) output = self.tag_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['tags'])) self.assertNotificationLog("metadef_namespace.delete_tags", [{'namespace': NAMESPACE3}]) def test_namespace_delete_tags_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.delete_tags, request, NAMESPACE3) self.assertNotificationsLog([]) def test_namespace_delete_tags_other_owner_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.namespace_controller.delete_tags(request, NAMESPACE3) output = self.tag_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(0, len(output['tags'])) self.assertNotificationLog("metadef_namespace.delete_tags", [{'namespace': NAMESPACE3}]) def test_namespace_non_existing_delete_tags(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.delete_tags, request, NAMESPACE4) self.assertNotificationsLog([]) def test_namespace_create(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 namespace = self.namespace_controller.create(request, namespace) self.assertEqual(NAMESPACE4, namespace.namespace) self.assertNotificationLog("metadef_namespace.create", [{'namespace': NAMESPACE4}]) namespace = self.namespace_controller.show(request, NAMESPACE4) self.assertEqual(NAMESPACE4, namespace.namespace) def test_namespace_create_duplicate(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = 'new-namespace' new_ns = self.namespace_controller.create(request, namespace) self.assertEqual('new-namespace', new_ns.namespace) self.assertRaises(webob.exc.HTTPConflict, self.namespace_controller.create, request, namespace) def test_namespace_create_different_owner(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 namespace.owner = TENANT4 self.assertRaises(webob.exc.HTTPForbidden, self.namespace_controller.create, request, namespace) self.assertNotificationsLog([]) def test_namespace_create_different_owner_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 namespace.owner = TENANT4 namespace = self.namespace_controller.create(request, namespace) self.assertEqual(NAMESPACE4, namespace.namespace) self.assertNotificationLog("metadef_namespace.create", [{'namespace': NAMESPACE4}]) namespace = self.namespace_controller.show(request, NAMESPACE4) self.assertEqual(NAMESPACE4, namespace.namespace) def test_namespace_create_with_related_resources(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 prop1 = properties.PropertyType() prop1.type = 'string' prop1.title = 'title' prop2 = properties.PropertyType() prop2.type = 'string' prop2.title = 'title' namespace.properties = {PROPERTY1: prop1, PROPERTY2: prop2} object1 = objects.MetadefObject() object1.name = OBJECT1 object1.required = [] object1.properties = {} object2 = objects.MetadefObject() object2.name = OBJECT2 object2.required = [] object2.properties = {} namespace.objects = [object1, object2] output = self.namespace_controller.create(request, namespace) self.assertEqual(NAMESPACE4, namespace.namespace) output = output.to_dict() self.assertEqual(2, len(output['properties'])) actual = set([property for property in output['properties']]) expected = set([PROPERTY1, PROPERTY2]) self.assertEqual(expected, actual) self.assertEqual(2, len(output['objects'])) actual = set([object.name for object in output['objects']]) expected = set([OBJECT1, OBJECT2]) self.assertEqual(expected, actual) output = self.namespace_controller.show(request, NAMESPACE4) self.assertEqual(NAMESPACE4, namespace.namespace) output = output.to_dict() self.assertEqual(2, len(output['properties'])) actual = set([property for property in output['properties']]) expected = set([PROPERTY1, PROPERTY2]) self.assertEqual(expected, actual) self.assertEqual(2, len(output['objects'])) actual = set([object.name for object in output['objects']]) expected = set([OBJECT1, OBJECT2]) self.assertEqual(expected, actual) self.assertNotificationsLog([ { 'type': 'metadef_namespace.create', 'payload': { 'namespace': NAMESPACE4, 'owner': TENANT1, } }, { 'type': 'metadef_object.create', 'payload': { 'namespace': NAMESPACE4, 'name': OBJECT1, 'properties': [], } }, { 'type': 'metadef_object.create', 'payload': { 'namespace': NAMESPACE4, 'name': OBJECT2, 'properties': [], } }, { 'type': 'metadef_property.create', 'payload': { 'namespace': NAMESPACE4, 'type': 'string', 'title': 'title', } }, { 'type': 'metadef_property.create', 'payload': { 'namespace': NAMESPACE4, 'type': 'string', 'title': 'title', } } ]) def test_namespace_create_conflict(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE1 self.assertRaises(webob.exc.HTTPConflict, self.namespace_controller.create, request, namespace) self.assertNotificationsLog([]) def test_namespace_update(self): request = unit_test_utils.get_fake_request() namespace = self.namespace_controller.show(request, NAMESPACE1) namespace.protected = False namespace = self.namespace_controller.update(request, namespace, NAMESPACE1) self.assertFalse(namespace.protected) self.assertNotificationLog("metadef_namespace.update", [ {'namespace': NAMESPACE1, 'protected': False} ]) namespace = self.namespace_controller.show(request, NAMESPACE1) self.assertFalse(namespace.protected) def test_namespace_update_non_existing(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE4 self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.update, request, namespace, NAMESPACE4) self.assertNotificationsLog([]) def test_namespace_update_non_visible(self): request = unit_test_utils.get_fake_request() namespace = namespaces.Namespace() namespace.namespace = NAMESPACE2 self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.update, request, namespace, NAMESPACE2) self.assertNotificationsLog([]) def test_namespace_update_non_visible_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) namespace = self.namespace_controller.show(request, NAMESPACE2) namespace.protected = False namespace = self.namespace_controller.update(request, namespace, NAMESPACE2) self.assertFalse(namespace.protected) self.assertNotificationLog("metadef_namespace.update", [ {'namespace': NAMESPACE2, 'protected': False} ]) namespace = self.namespace_controller.show(request, NAMESPACE2) self.assertFalse(namespace.protected) def test_namespace_update_name(self): request = unit_test_utils.get_fake_request() namespace = self.namespace_controller.show(request, NAMESPACE1) namespace.namespace = NAMESPACE4 namespace = self.namespace_controller.update(request, namespace, NAMESPACE1) self.assertEqual(NAMESPACE4, namespace.namespace) self.assertNotificationLog("metadef_namespace.update", [ {'namespace': NAMESPACE4, 'namespace_old': NAMESPACE1} ]) namespace = self.namespace_controller.show(request, NAMESPACE4) self.assertEqual(NAMESPACE4, namespace.namespace) self.assertRaises(webob.exc.HTTPNotFound, self.namespace_controller.show, request, NAMESPACE1) def test_namespace_update_name_conflict(self): request = unit_test_utils.get_fake_request() namespace = self.namespace_controller.show(request, NAMESPACE1) namespace.namespace = NAMESPACE2 self.assertRaises(webob.exc.HTTPConflict, self.namespace_controller.update, request, namespace, NAMESPACE1) self.assertNotificationsLog([]) def test_property_index(self): request = unit_test_utils.get_fake_request() output = self.property_controller.index(request, NAMESPACE3) self.assertEqual(2, len(output.properties)) actual = set([property for property in output.properties]) expected = set([PROPERTY1, PROPERTY2]) self.assertEqual(expected, actual) def test_property_index_empty(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) output = self.property_controller.index(request, NAMESPACE2) self.assertEqual(0, len(output.properties)) def test_property_index_non_existing_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.index, request, NAMESPACE4) def test_property_show(self): request = unit_test_utils.get_fake_request() output = self.property_controller.show(request, NAMESPACE3, PROPERTY1) self.assertEqual(PROPERTY1, output.name) def test_property_show_specific_resource_type(self): request = unit_test_utils.get_fake_request() output = self.property_controller.show( request, NAMESPACE6, ''.join([PREFIX1, PROPERTY4]), filters={'resource_type': RESOURCE_TYPE4}) self.assertEqual(PROPERTY4, output.name) def test_property_show_prefix_mismatch(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE6, PROPERTY4, filters={'resource_type': RESOURCE_TYPE4}) def test_property_show_non_existing_resource_type(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE2, PROPERTY1, filters={'resource_type': 'test'}) def test_property_show_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE2, PROPERTY1) def test_property_show_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE1, PROPERTY1) def test_property_show_non_visible_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, is_admin=True) output = self.property_controller.show(request, NAMESPACE1, PROPERTY1) self.assertEqual(PROPERTY1, output.name) def test_property_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.property_controller.delete(request, NAMESPACE3, PROPERTY1) self.assertNotificationLog("metadef_property.delete", [{'name': PROPERTY1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE3, PROPERTY1) def test_property_delete_disabled_notification(self): self.config(disabled_notifications=["metadef_property.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT3) self.property_controller.delete(request, NAMESPACE3, PROPERTY1) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE3, PROPERTY1) def test_property_delete_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.property_controller.delete, request, NAMESPACE3, PROPERTY1) self.assertNotificationsLog([]) def test_property_delete_other_owner_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.property_controller.delete(request, NAMESPACE3, PROPERTY1) self.assertNotificationLog("metadef_property.delete", [{'name': PROPERTY1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.show, request, NAMESPACE3, PROPERTY1) def test_property_delete_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.delete, request, NAMESPACE5, PROPERTY2) self.assertNotificationsLog([]) def test_property_delete_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.delete, request, NAMESPACE4, PROPERTY1) self.assertNotificationsLog([]) def test_property_delete_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.delete, request, NAMESPACE1, PROPERTY1) self.assertNotificationsLog([]) def test_property_delete_admin_protected(self): request = unit_test_utils.get_fake_request(is_admin=True) self.assertRaises(webob.exc.HTTPForbidden, self.property_controller.delete, request, NAMESPACE1, PROPERTY1) self.assertNotificationsLog([]) def test_property_create(self): request = unit_test_utils.get_fake_request() property = properties.PropertyType() property.name = PROPERTY2 property.type = 'string' property.title = 'title' property = self.property_controller.create(request, NAMESPACE1, property) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertNotificationLog("metadef_property.create", [{'name': PROPERTY2, 'namespace': NAMESPACE1}]) property = self.property_controller.show(request, NAMESPACE1, PROPERTY2) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) def test_property_create_with_operators(self): request = unit_test_utils.get_fake_request() property = properties.PropertyType() property.name = PROPERTY2 property.type = 'string' property.title = 'title' property.operators = [''] property = self.property_controller.create(request, NAMESPACE1, property) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertEqual([''], property.operators) property = self.property_controller.show(request, NAMESPACE1, PROPERTY2) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertEqual([''], property.operators) def test_property_create_conflict(self): request = unit_test_utils.get_fake_request() property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPConflict, self.property_controller.create, request, NAMESPACE1, property) self.assertNotificationsLog([]) def test_property_create_non_visible_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPForbidden, self.property_controller.create, request, NAMESPACE1, property) self.assertNotificationsLog([]) def test_property_create_non_visible_namespace_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, is_admin=True) property = properties.PropertyType() property.name = PROPERTY2 property.type = 'string' property.title = 'title' property = self.property_controller.create(request, NAMESPACE1, property) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertNotificationLog("metadef_property.create", [{'name': PROPERTY2, 'namespace': NAMESPACE1}]) property = self.property_controller.show(request, NAMESPACE1, PROPERTY2) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) def test_property_create_non_existing_namespace(self): request = unit_test_utils.get_fake_request() property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.create, request, NAMESPACE4, property) self.assertNotificationsLog([]) def test_property_create_duplicate(self): request = unit_test_utils.get_fake_request() property = properties.PropertyType() property.name = 'new-property' property.type = 'string' property.title = 'title' new_property = self.property_controller.create(request, NAMESPACE1, property) self.assertEqual('new-property', new_property.name) self.assertRaises(webob.exc.HTTPConflict, self.property_controller.create, request, NAMESPACE1, property) def test_property_update(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) property.name = PROPERTY1 property.type = 'string123' property.title = 'title123' property = self.property_controller.update(request, NAMESPACE3, PROPERTY1, property) self.assertEqual(PROPERTY1, property.name) self.assertEqual('string123', property.type) self.assertEqual('title123', property.title) self.assertNotificationLog("metadef_property.update", [ { 'name': PROPERTY1, 'namespace': NAMESPACE3, 'type': 'string123', 'title': 'title123', } ]) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) self.assertEqual(PROPERTY1, property.name) self.assertEqual('string123', property.type) self.assertEqual('title123', property.title) def test_property_update_name(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) property.name = PROPERTY3 property.type = 'string' property.title = 'title' property = self.property_controller.update(request, NAMESPACE3, PROPERTY1, property) self.assertEqual(PROPERTY3, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) self.assertNotificationLog("metadef_property.update", [ { 'name': PROPERTY3, 'name_old': PROPERTY1, 'namespace': NAMESPACE3, 'type': 'string', 'title': 'title', } ]) property = self.property_controller.show(request, NAMESPACE3, PROPERTY2) self.assertEqual(PROPERTY2, property.name) self.assertEqual('string', property.type) self.assertEqual('title', property.title) def test_property_update_conflict(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) property = self.property_controller.show(request, NAMESPACE3, PROPERTY1) property.name = PROPERTY2 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPConflict, self.property_controller.update, request, NAMESPACE3, PROPERTY1, property) self.assertNotificationsLog([]) def test_property_update_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.update, request, NAMESPACE5, PROPERTY1, property) self.assertNotificationsLog([]) def test_property_update_namespace_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) property = properties.PropertyType() property.name = PROPERTY1 property.type = 'string' property.title = 'title' self.assertRaises(webob.exc.HTTPNotFound, self.property_controller.update, request, NAMESPACE4, PROPERTY1, property) self.assertNotificationsLog([]) def test_object_index(self): request = unit_test_utils.get_fake_request() output = self.object_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(2, len(output['objects'])) actual = set([object.name for object in output['objects']]) expected = set([OBJECT1, OBJECT2]) self.assertEqual(expected, actual) def test_object_index_zero_limit(self): request = unit_test_utils.get_fake_request('/metadefs/namespaces/' 'Namespace3/' 'objects?limit=0') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_object_index_empty(self): request = unit_test_utils.get_fake_request() output = self.object_controller.index(request, NAMESPACE5) output = output.to_dict() self.assertEqual(0, len(output['objects'])) def test_object_index_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.index, request, NAMESPACE4) def test_object_show(self): request = unit_test_utils.get_fake_request() output = self.object_controller.show(request, NAMESPACE3, OBJECT1) self.assertEqual(OBJECT1, output.name) def test_object_show_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE5, OBJECT1) def test_object_show_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE1, OBJECT1) def test_object_show_non_visible_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, is_admin=True) output = self.object_controller.show(request, NAMESPACE1, OBJECT1) self.assertEqual(OBJECT1, output.name) def test_object_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.object_controller.delete(request, NAMESPACE3, OBJECT1) self.assertNotificationLog("metadef_object.delete", [{'name': OBJECT1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE3, OBJECT1) def test_object_delete_disabled_notification(self): self.config(disabled_notifications=["metadef_object.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT3) self.object_controller.delete(request, NAMESPACE3, OBJECT1) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE3, OBJECT1) def test_object_delete_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.object_controller.delete, request, NAMESPACE3, OBJECT1) self.assertNotificationsLog([]) def test_object_delete_other_owner_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.object_controller.delete(request, NAMESPACE3, OBJECT1) self.assertNotificationLog("metadef_object.delete", [{'name': OBJECT1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.show, request, NAMESPACE3, OBJECT1) def test_object_delete_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.delete, request, NAMESPACE5, OBJECT1) self.assertNotificationsLog([]) def test_object_delete_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.delete, request, NAMESPACE4, OBJECT1) self.assertNotificationsLog([]) def test_object_delete_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.delete, request, NAMESPACE1, OBJECT1) self.assertNotificationsLog([]) def test_object_delete_admin_protected(self): request = unit_test_utils.get_fake_request(is_admin=True) self.assertRaises(webob.exc.HTTPForbidden, self.object_controller.delete, request, NAMESPACE1, OBJECT1) self.assertNotificationsLog([]) def test_object_create(self): request = unit_test_utils.get_fake_request() object = objects.MetadefObject() object.name = OBJECT2 object.required = [] object.properties = {} object = self.object_controller.create(request, object, NAMESPACE1) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) self.assertNotificationLog("metadef_object.create", [{'name': OBJECT2, 'namespace': NAMESPACE1, 'properties': []}]) object = self.object_controller.show(request, NAMESPACE1, OBJECT2) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) def test_object_create_duplicate(self): request = unit_test_utils.get_fake_request() object = objects.MetadefObject() object.name = 'New-Object' object.required = [] object.properties = {} new_obj = self.object_controller.create(request, object, NAMESPACE3) self.assertEqual('New-Object', new_obj.name) self.assertRaises(webob.exc.HTTPConflict, self.object_controller.create, request, object, NAMESPACE3) def test_object_create_conflict(self): request = unit_test_utils.get_fake_request() object = objects.MetadefObject() object.name = OBJECT1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPConflict, self.object_controller.create, request, object, NAMESPACE1) self.assertNotificationsLog([]) def test_object_create_non_existing_namespace(self): request = unit_test_utils.get_fake_request() object = objects.MetadefObject() object.name = PROPERTY1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.create, request, object, NAMESPACE4) self.assertNotificationsLog([]) def test_object_create_non_visible_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) object = objects.MetadefObject() object.name = OBJECT1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPForbidden, self.object_controller.create, request, object, NAMESPACE1) self.assertNotificationsLog([]) def test_object_create_non_visible_namespace_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, is_admin=True) object = objects.MetadefObject() object.name = OBJECT2 object.required = [] object.properties = {} object = self.object_controller.create(request, object, NAMESPACE1) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) self.assertNotificationLog("metadef_object.create", [{'name': OBJECT2, 'namespace': NAMESPACE1}]) object = self.object_controller.show(request, NAMESPACE1, OBJECT2) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) def test_object_create_missing_properties(self): request = unit_test_utils.get_fake_request() object = objects.MetadefObject() object.name = OBJECT2 object.required = [] object = self.object_controller.create(request, object, NAMESPACE1) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertNotificationLog("metadef_object.create", [{'name': OBJECT2, 'namespace': NAMESPACE1, 'properties': []}]) object = self.object_controller.show(request, NAMESPACE1, OBJECT2) self.assertEqual(OBJECT2, object.name) self.assertEqual([], object.required) self.assertEqual({}, object.properties) def test_object_update(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) object = self.object_controller.show(request, NAMESPACE3, OBJECT1) object.name = OBJECT1 object.description = 'description' object = self.object_controller.update(request, object, NAMESPACE3, OBJECT1) self.assertEqual(OBJECT1, object.name) self.assertEqual('description', object.description) self.assertNotificationLog("metadef_object.update", [ { 'name': OBJECT1, 'namespace': NAMESPACE3, 'description': 'description', } ]) property = self.object_controller.show(request, NAMESPACE3, OBJECT1) self.assertEqual(OBJECT1, property.name) self.assertEqual('description', object.description) def test_object_update_name(self): request = unit_test_utils.get_fake_request() object = self.object_controller.show(request, NAMESPACE1, OBJECT1) object.name = OBJECT2 object = self.object_controller.update(request, object, NAMESPACE1, OBJECT1) self.assertEqual(OBJECT2, object.name) self.assertNotificationLog("metadef_object.update", [ { 'name': OBJECT2, 'name_old': OBJECT1, 'namespace': NAMESPACE1, } ]) object = self.object_controller.show(request, NAMESPACE1, OBJECT2) self.assertEqual(OBJECT2, object.name) def test_object_update_conflict(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) object = self.object_controller.show(request, NAMESPACE3, OBJECT1) object.name = OBJECT2 self.assertRaises(webob.exc.HTTPConflict, self.object_controller.update, request, object, NAMESPACE3, OBJECT1) self.assertNotificationsLog([]) def test_object_update_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) object = objects.MetadefObject() object.name = OBJECT1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.update, request, object, NAMESPACE5, OBJECT1) self.assertNotificationsLog([]) def test_object_update_namespace_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) object = objects.MetadefObject() object.name = OBJECT1 object.required = [] object.properties = {} self.assertRaises(webob.exc.HTTPNotFound, self.object_controller.update, request, object, NAMESPACE4, OBJECT1) self.assertNotificationsLog([]) def test_resource_type_index(self): request = unit_test_utils.get_fake_request() output = self.rt_controller.index(request) self.assertEqual(3, len(output.resource_types)) actual = set([rtype.name for rtype in output.resource_types]) expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2, RESOURCE_TYPE4]) self.assertEqual(expected, actual) def test_resource_type_show(self): request = unit_test_utils.get_fake_request() output = self.rt_controller.show(request, NAMESPACE3) self.assertEqual(1, len(output.resource_type_associations)) actual = set([rt.name for rt in output.resource_type_associations]) expected = set([RESOURCE_TYPE1]) self.assertEqual(expected, actual) def test_resource_type_show_empty(self): request = unit_test_utils.get_fake_request() output = self.rt_controller.show(request, NAMESPACE5) self.assertEqual(0, len(output.resource_type_associations)) def test_resource_type_show_non_visible(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.show, request, NAMESPACE2) def test_resource_type_show_non_visible_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, is_admin=True) output = self.rt_controller.show(request, NAMESPACE2) self.assertEqual(2, len(output.resource_type_associations)) actual = set([rt.name for rt in output.resource_type_associations]) expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) self.assertEqual(expected, actual) def test_resource_type_show_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.show, request, NAMESPACE4) def test_resource_type_association_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) self.assertNotificationLog("metadef_resource_type.delete", [{'name': RESOURCE_TYPE1, 'namespace': NAMESPACE3}]) output = self.rt_controller.show(request, NAMESPACE3) self.assertEqual(0, len(output.resource_type_associations)) def test_resource_type_association_delete_disabled_notification(self): self.config(disabled_notifications=["metadef_resource_type.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT3) self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) self.assertNotificationsLog([]) output = self.rt_controller.show(request, NAMESPACE3) self.assertEqual(0, len(output.resource_type_associations)) def test_resource_type_association_delete_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.delete, request, NAMESPACE3, RESOURCE_TYPE1) self.assertNotificationsLog([]) def test_resource_type_association_delete_other_owner_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.rt_controller.delete(request, NAMESPACE3, RESOURCE_TYPE1) self.assertNotificationLog("metadef_resource_type.delete", [{'name': RESOURCE_TYPE1, 'namespace': NAMESPACE3}]) output = self.rt_controller.show(request, NAMESPACE3) self.assertEqual(0, len(output.resource_type_associations)) def test_resource_type_association_delete_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, request, NAMESPACE1, RESOURCE_TYPE2) self.assertNotificationsLog([]) def test_resource_type_association_delete_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, request, NAMESPACE4, RESOURCE_TYPE1) self.assertNotificationsLog([]) def test_resource_type_association_delete_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.delete, request, NAMESPACE1, RESOURCE_TYPE1) self.assertNotificationsLog([]) def test_resource_type_association_delete_protected_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.delete, request, NAMESPACE1, RESOURCE_TYPE1) self.assertNotificationsLog([]) def test_resource_type_association_create(self): request = unit_test_utils.get_fake_request() rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' rt = self.rt_controller.create(request, rt, NAMESPACE1) self.assertEqual(RESOURCE_TYPE2, rt.name) self.assertEqual('pref', rt.prefix) self.assertNotificationLog("metadef_resource_type.create", [{'name': RESOURCE_TYPE2, 'namespace': NAMESPACE1}]) output = self.rt_controller.show(request, NAMESPACE1) self.assertEqual(2, len(output.resource_type_associations)) actual = set([x.name for x in output.resource_type_associations]) expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) self.assertEqual(expected, actual) def test_resource_type_association_create_conflict(self): request = unit_test_utils.get_fake_request() rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE1 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPConflict, self.rt_controller.create, request, rt, NAMESPACE1) self.assertNotificationsLog([]) def test_resource_type_association_create_non_existing_namespace(self): request = unit_test_utils.get_fake_request() rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE1 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create, request, rt, NAMESPACE4) self.assertNotificationsLog([]) def test_resource_type_association_create_non_existing_resource_type(self): request = unit_test_utils.get_fake_request() rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE3 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPNotFound, self.rt_controller.create, request, rt, NAMESPACE1) self.assertNotificationsLog([]) def test_resource_type_association_create_non_visible_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' self.assertRaises(webob.exc.HTTPForbidden, self.rt_controller.create, request, rt, NAMESPACE1) self.assertNotificationsLog([]) def test_resource_type_association_create_non_visible_namesp_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, is_admin=True) rt = resource_types.ResourceTypeAssociation() rt.name = RESOURCE_TYPE2 rt.prefix = 'pref' rt = self.rt_controller.create(request, rt, NAMESPACE1) self.assertEqual(RESOURCE_TYPE2, rt.name) self.assertEqual('pref', rt.prefix) self.assertNotificationLog("metadef_resource_type.create", [{'name': RESOURCE_TYPE2, 'namespace': NAMESPACE1}]) output = self.rt_controller.show(request, NAMESPACE1) self.assertEqual(2, len(output.resource_type_associations)) actual = set([x.name for x in output.resource_type_associations]) expected = set([RESOURCE_TYPE1, RESOURCE_TYPE2]) self.assertEqual(expected, actual) def test_tag_index(self): request = unit_test_utils.get_fake_request() output = self.tag_controller.index(request, NAMESPACE3) output = output.to_dict() self.assertEqual(2, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2]) self.assertEqual(expected, actual) def test_tag_index_empty(self): request = unit_test_utils.get_fake_request() output = self.tag_controller.index(request, NAMESPACE5) output = output.to_dict() self.assertEqual(0, len(output['tags'])) def test_tag_index_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.index, request, NAMESPACE4) def test_tag_show(self): request = unit_test_utils.get_fake_request() output = self.tag_controller.show(request, NAMESPACE3, TAG1) self.assertEqual(TAG1, output.name) def test_tag_show_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE5, TAG1) def test_tag_show_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE1, TAG1) def test_tag_show_non_visible_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, is_admin=True) output = self.tag_controller.show(request, NAMESPACE1, TAG1) self.assertEqual(TAG1, output.name) def test_tag_delete(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) self.tag_controller.delete(request, NAMESPACE3, TAG1) self.assertNotificationLog("metadef_tag.delete", [{'name': TAG1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE3, TAG1) def test_tag_delete_disabled_notification(self): self.config(disabled_notifications=["metadef_tag.delete"]) request = unit_test_utils.get_fake_request(tenant=TENANT3) self.tag_controller.delete(request, NAMESPACE3, TAG1) self.assertNotificationsLog([]) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE3, TAG1) def test_tag_delete_other_owner(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.tag_controller.delete, request, NAMESPACE3, TAG1) self.assertNotificationsLog([]) def test_tag_delete_other_owner_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.tag_controller.delete(request, NAMESPACE3, TAG1) self.assertNotificationLog("metadef_tag.delete", [{'name': TAG1, 'namespace': NAMESPACE3}]) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.show, request, NAMESPACE3, TAG1) def test_tag_delete_non_existing(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.delete, request, NAMESPACE5, TAG1) self.assertNotificationsLog([]) def test_tag_delete_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.delete, request, NAMESPACE4, TAG1) self.assertNotificationsLog([]) def test_tag_delete_non_visible(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.delete, request, NAMESPACE1, TAG1) self.assertNotificationsLog([]) def test_tag_delete_admin_protected(self): request = unit_test_utils.get_fake_request(is_admin=True) self.assertRaises(webob.exc.HTTPForbidden, self.tag_controller.delete, request, NAMESPACE1, TAG1) self.assertNotificationsLog([]) def test_tag_create(self): request = unit_test_utils.get_fake_request() tag = self.tag_controller.create(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) self.assertNotificationLog("metadef_tag.create", [{'name': TAG2, 'namespace': NAMESPACE1}]) tag = self.tag_controller.show(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) def test_tag_create_tags(self): request = unit_test_utils.get_fake_request() metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture() output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(3, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2, TAG3]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG1, 'namespace': NAMESPACE1}, {'name': TAG2, 'namespace': NAMESPACE1}, {'name': TAG3, 'namespace': NAMESPACE1}, ] ) def test_tag_create_duplicate_tags(self): request = unit_test_utils.get_fake_request() metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4]) self.assertRaises( webob.exc.HTTPConflict, self.tag_controller.create_tags, request, metadef_tags, NAMESPACE1) self.assertNotificationsLog([]) def test_tag_create_duplicate_with_pre_existing_tags(self): request = unit_test_utils.get_fake_request() metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG1, TAG2, TAG3]) output = self.tag_controller.create_tags( request, metadef_tags, NAMESPACE1) output = output.to_dict() self.assertEqual(3, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2, TAG3]) self.assertEqual(expected, actual) self.assertNotificationLog( "metadef_tag.create", [ {'name': TAG1, 'namespace': NAMESPACE1}, {'name': TAG2, 'namespace': NAMESPACE1}, {'name': TAG3, 'namespace': NAMESPACE1}, ] ) metadef_tags = tags.MetadefTags() metadef_tags.tags = _db_tags_fixture([TAG4, TAG5, TAG4]) self.assertRaises( webob.exc.HTTPConflict, self.tag_controller.create_tags, request, metadef_tags, NAMESPACE1) self.assertNotificationsLog([]) output = self.tag_controller.index(request, NAMESPACE1) output = output.to_dict() self.assertEqual(3, len(output['tags'])) actual = set([tag.name for tag in output['tags']]) expected = set([TAG1, TAG2, TAG3]) self.assertEqual(expected, actual) def test_tag_create_conflict(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPConflict, self.tag_controller.create, request, NAMESPACE1, TAG1) self.assertNotificationsLog([]) def test_tag_create_non_existing_namespace(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.create, request, NAMESPACE4, TAG1) self.assertNotificationsLog([]) def test_tag_create_non_visible_namespace(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPForbidden, self.tag_controller.create, request, NAMESPACE1, TAG1) self.assertNotificationsLog([]) def test_tag_create_non_visible_namespace_admin(self): request = unit_test_utils.get_fake_request(tenant=TENANT2, is_admin=True) tag = self.tag_controller.create(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) self.assertNotificationLog("metadef_tag.create", [{'name': TAG2, 'namespace': NAMESPACE1}]) tag = self.tag_controller.show(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) def test_tag_update(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) tag = self.tag_controller.show(request, NAMESPACE3, TAG1) tag.name = TAG3 tag = self.tag_controller.update(request, tag, NAMESPACE3, TAG1) self.assertEqual(TAG3, tag.name) self.assertNotificationLog("metadef_tag.update", [ {'name': TAG3, 'namespace': NAMESPACE3} ]) property = self.tag_controller.show(request, NAMESPACE3, TAG3) self.assertEqual(TAG3, property.name) def test_tag_update_name(self): request = unit_test_utils.get_fake_request() tag = self.tag_controller.show(request, NAMESPACE1, TAG1) tag.name = TAG2 tag = self.tag_controller.update(request, tag, NAMESPACE1, TAG1) self.assertEqual(TAG2, tag.name) self.assertNotificationLog("metadef_tag.update", [ {'name': TAG2, 'name_old': TAG1, 'namespace': NAMESPACE1} ]) tag = self.tag_controller.show(request, NAMESPACE1, TAG2) self.assertEqual(TAG2, tag.name) def test_tag_update_conflict(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) tag = self.tag_controller.show(request, NAMESPACE3, TAG1) tag.name = TAG2 self.assertRaises(webob.exc.HTTPConflict, self.tag_controller.update, request, tag, NAMESPACE3, TAG1) self.assertNotificationsLog([]) def test_tag_update_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) tag = tags.MetadefTag() tag.name = TAG1 self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.update, request, tag, NAMESPACE5, TAG1) self.assertNotificationsLog([]) def test_tag_update_namespace_non_existing(self): request = unit_test_utils.get_fake_request(tenant=TENANT3) tag = tags.MetadefTag() tag.name = TAG1 self.assertRaises(webob.exc.HTTPNotFound, self.tag_controller.update, request, tag, NAMESPACE4, TAG1) self.assertNotificationsLog([]) glance-12.0.0/glance/tests/unit/v2/test_image_members_resource.py0000664000567000056710000005552212701407047026214 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import glance_store from oslo_config import cfg from oslo_serialization import jsonutils import webob import glance.api.v2.image_members import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) ISOTIME = '2012-05-16T15:27:36Z' CONF = cfg.CONF BASE_URI = unit_test_utils.BASE_URI UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' UUID5 = '3eee7cc2-eae7-4c0f-b50d-a7160b0c62ed' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'is_public': False, 'properties': {}, 'checksum': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } obj.update(kwargs) return obj def _db_image_member_fixture(image_id, member_id, **kwargs): obj = { 'image_id': image_id, 'member': member_id, 'status': 'pending', } obj.update(kwargs) return obj def _domain_fixture(id, **kwargs): properties = { 'id': id, } properties.update(kwargs) return glance.domain.ImageMembership(**properties) class TestImageMembersController(test_utils.BaseTestCase): def setUp(self): super(TestImageMembersController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.store = unit_test_utils.FakeStoreAPI() self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self._create_images() self._create_image_members() self.controller = glance.api.v2.image_members.ImageMembersController( self.db, self.policy, self.notifier, self.store) glance_store.register_opts(CONF) self.config(default_store='filesystem', filesystem_store_datadir=self.test_dir, group="glance_store") glance_store.create_stores() def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, name='1', size=256, is_public=True, locations=[{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}]), _db_fixture(UUID2, owner=TENANT1, name='2', size=512), _db_fixture(UUID3, owner=TENANT3, name='3', size=512), _db_fixture(UUID4, owner=TENANT4, name='4', size=1024), _db_fixture(UUID5, owner=TENANT1, name='5', size=1024), ] [self.db.image_create(None, image) for image in self.images] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID2, TENANT4), _db_image_member_fixture(UUID3, TENANT4), _db_image_member_fixture(UUID3, TENANT2), _db_image_member_fixture(UUID4, TENANT1), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_index(self): request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID2) self.assertEqual(1, len(output['members'])) actual = set([image_member.member_id for image_member in output['members']]) expected = set([TENANT4]) self.assertEqual(expected, actual) def test_index_no_members(self): request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID5) self.assertEqual(0, len(output['members'])) self.assertEqual({'members': []}, output) def test_index_member_view(self): # UUID3 is a private image owned by TENANT3 # UUID3 has members TENANT2 and TENANT4 # When TENANT4 lists members for UUID3, should not see TENANT2 request = unit_test_utils.get_fake_request(tenant=TENANT4) output = self.controller.index(request, UUID3) self.assertEqual(1, len(output['members'])) actual = set([image_member.member_id for image_member in output['members']]) expected = set([TENANT4]) self.assertEqual(expected, actual) def test_index_private_image(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.controller.index, request, UUID5) def test_index_public_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, request, UUID1) def test_index_private_image_visible_members_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request, UUID4) self.assertEqual(1, len(output['members'])) actual = set([image_member.member_id for image_member in output['members']]) expected = set([TENANT1]) self.assertEqual(expected, actual) def test_index_allowed_by_get_members_policy(self): rules = {"get_members": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID2) self.assertEqual(1, len(output['members'])) def test_index_forbidden_by_get_members_policy(self): rules = {"get_members": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, request, image_id=UUID2) def test_show(self): request = unit_test_utils.get_fake_request(tenant=TENANT1) output = self.controller.show(request, UUID2, TENANT4) expected = self.image_members[0] self.assertEqual(expected['image_id'], output.image_id) self.assertEqual(expected['member'], output.member_id) self.assertEqual(expected['status'], output.status) def test_show_by_member(self): request = unit_test_utils.get_fake_request(tenant=TENANT4) output = self.controller.show(request, UUID2, TENANT4) expected = self.image_members[0] self.assertEqual(expected['image_id'], output.image_id) self.assertEqual(expected['member'], output.member_id) self.assertEqual(expected['status'], output.status) def test_show_forbidden(self): request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID2, TENANT4) def test_show_not_found(self): # one member should not be able to view status of another member # of the same image request = unit_test_utils.get_fake_request(tenant=TENANT2) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID3, TENANT4) def test_create(self): request = unit_test_utils.get_fake_request() image_id = UUID2 member_id = TENANT3 output = self.controller.create(request, image_id=image_id, member_id=member_id) self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT3, output.member_id) def test_create_allowed_by_add_policy(self): rules = {"add_member": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() output = self.controller.create(request, image_id=UUID2, member_id=TENANT3) self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT3, output.member_id) def test_create_forbidden_by_add_policy(self): rules = {"add_member": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image_id=UUID2, member_id=TENANT3) def test_create_duplicate_member(self): request = unit_test_utils.get_fake_request() image_id = UUID2 member_id = TENANT3 output = self.controller.create(request, image_id=image_id, member_id=member_id) self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT3, output.member_id) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, request, image_id=image_id, member_id=member_id) def test_create_overlimit(self): self.config(image_member_quota=0) request = unit_test_utils.get_fake_request() image_id = UUID2 member_id = TENANT3 self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, request, image_id=image_id, member_id=member_id) def test_create_unlimited(self): self.config(image_member_quota=-1) request = unit_test_utils.get_fake_request() image_id = UUID2 member_id = TENANT3 output = self.controller.create(request, image_id=image_id, member_id=member_id) self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT3, output.member_id) def test_update_done_by_member(self): request = unit_test_utils.get_fake_request(tenant=TENANT4) image_id = UUID2 member_id = TENANT4 output = self.controller.update(request, image_id=image_id, member_id=member_id, status='accepted') self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT4, output.member_id) self.assertEqual('accepted', output.status) def test_update_done_by_member_forbidden_by_policy(self): rules = {"modify_member": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT4) self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, image_id=UUID2, member_id=TENANT4, status='accepted') def test_update_done_by_member_allowed_by_policy(self): rules = {"modify_member": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT4) output = self.controller.update(request, image_id=UUID2, member_id=TENANT4, status='accepted') self.assertEqual(UUID2, output.image_id) self.assertEqual(TENANT4, output.member_id) self.assertEqual('accepted', output.status) def test_update_done_by_owner(self): request = unit_test_utils.get_fake_request(tenant=TENANT1) self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID2, TENANT4, status='accepted') def test_update_non_existent_image(self): request = unit_test_utils.get_fake_request(tenant=TENANT1) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, request, '123', TENANT4, status='accepted') def test_update_invalid_status(self): request = unit_test_utils.get_fake_request(tenant=TENANT4) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID2, TENANT4, status='accept') def test_create_private_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, UUID4, TENANT2) def test_create_public_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, UUID1, TENANT2) def test_create_image_does_not_exist(self): request = unit_test_utils.get_fake_request() image_id = 'fake-image-id' member_id = TENANT3 self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, request, image_id=image_id, member_id=member_id) def test_delete(self): request = unit_test_utils.get_fake_request() member_id = TENANT4 image_id = UUID2 res = self.controller.delete(request, image_id, member_id) self.assertEqual(b'', res.body) self.assertEqual(204, res.status_code) found_member = self.db.image_member_find( request.context, image_id=image_id, member=member_id) self.assertEqual([], found_member) def test_delete_by_member(self): request = unit_test_utils.get_fake_request(tenant=TENANT4) self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID2, TENANT4) request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID2) self.assertEqual(1, len(output['members'])) actual = set([image_member.member_id for image_member in output['members']]) expected = set([TENANT4]) self.assertEqual(expected, actual) def test_delete_allowed_by_policies(self): rules = {"get_member": True, "delete_member": True} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT1) output = self.controller.delete(request, image_id=UUID2, member_id=TENANT4) request = unit_test_utils.get_fake_request() output = self.controller.index(request, UUID2) self.assertEqual(0, len(output['members'])) def test_delete_forbidden_by_get_member_policy(self): rules = {"get_member": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT1) self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID2, TENANT4) def test_delete_forbidden_by_delete_member_policy(self): rules = {"delete_member": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request(tenant=TENANT1) self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID2, TENANT4) def test_delete_private_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID4, TENANT1) def test_delete_public_image(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID1, TENANT1) def test_delete_image_does_not_exist(self): request = unit_test_utils.get_fake_request() member_id = TENANT2 image_id = 'fake-image-id' self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, image_id, member_id) def test_delete_member_does_not_exist(self): request = unit_test_utils.get_fake_request() member_id = 'fake-member-id' image_id = UUID2 found_member = self.db.image_member_find( request.context, image_id=image_id, member=member_id) self.assertEqual([], found_member) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, image_id, member_id) class TestImageMembersSerializer(test_utils.BaseTestCase): def setUp(self): super(TestImageMembersSerializer, self).setUp() self.serializer = glance.api.v2.image_members.ResponseSerializer() self.fixtures = [ _domain_fixture(id='1', image_id=UUID2, member_id=TENANT1, status='accepted', created_at=DATETIME, updated_at=DATETIME), _domain_fixture(id='2', image_id=UUID2, member_id=TENANT2, status='pending', created_at=DATETIME, updated_at=DATETIME), ] def test_index(self): expected = { 'members': [ { 'image_id': UUID2, 'member_id': TENANT1, 'status': 'accepted', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'schema': '/v2/schemas/member', }, { 'image_id': UUID2, 'member_id': TENANT2, 'status': 'pending', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'schema': '/v2/schemas/member', }, ], 'schema': '/v2/schemas/members', } request = webob.Request.blank('/v2/images/%s/members' % UUID2) response = webob.Response(request=request) result = {'members': self.fixtures} self.serializer.index(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_show(self): expected = { 'image_id': UUID2, 'member_id': TENANT1, 'status': 'accepted', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'schema': '/v2/schemas/member', } request = webob.Request.blank('/v2/images/%s/members/%s' % (UUID2, TENANT1)) response = webob.Response(request=request) result = self.fixtures[0] self.serializer.show(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_create(self): expected = {'image_id': UUID2, 'member_id': TENANT1, 'status': 'accepted', 'schema': '/v2/schemas/member', 'created_at': ISOTIME, 'updated_at': ISOTIME} request = webob.Request.blank('/v2/images/%s/members/%s' % (UUID2, TENANT1)) response = webob.Response(request=request) result = self.fixtures[0] self.serializer.create(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_update(self): expected = {'image_id': UUID2, 'member_id': TENANT1, 'status': 'accepted', 'schema': '/v2/schemas/member', 'created_at': ISOTIME, 'updated_at': ISOTIME} request = webob.Request.blank('/v2/images/%s/members/%s' % (UUID2, TENANT1)) response = webob.Response(request=request) result = self.fixtures[0] self.serializer.update(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) class TestImagesDeserializer(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializer, self).setUp() self.deserializer = glance.api.v2.image_members.RequestDeserializer() def test_create(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'member': TENANT1}) output = self.deserializer.create(request) expected = {'member_id': TENANT1} self.assertEqual(expected, output) def test_create_invalid(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'mem': TENANT1}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_member_empty(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'member': ''}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_list_return_error(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes([TENANT1]) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update_list_return_error(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes([TENANT1]) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'status': 'accepted'}) output = self.deserializer.update(request) expected = {'status': 'accepted'} self.assertEqual(expected, output) def test_update_invalid(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'mem': TENANT1}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) glance-12.0.0/glance/tests/unit/v2/test_image_data_resource.py0000664000567000056710000006624012701407047025472 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import glance_store import mock import six import webob import glance.api.policy import glance.api.v2.image_data from glance.common import exception from glance.common import wsgi from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils class Raise(object): def __init__(self, exc): self.exc = exc def __call__(self, *args, **kwargs): raise self.exc class FakeImage(object): def __init__(self, image_id=None, data=None, checksum=None, size=0, virtual_size=0, locations=None, container_format='bear', disk_format='rawr', status=None): self.image_id = image_id self.data = data self.checksum = checksum self.size = size self.virtual_size = virtual_size self.locations = locations self.container_format = container_format self.disk_format = disk_format self._status = status @property def status(self): return self._status @status.setter def status(self, value): if isinstance(self._status, BaseException): raise self._status else: self._status = value def get_data(self, *args, **kwargs): return self.data def set_data(self, data, size=None): self.data = ''.join(data) self.size = size self.status = 'modified-by-fake' class FakeImageRepo(object): def __init__(self, result=None): self.result = result def get(self, image_id): if isinstance(self.result, BaseException): raise self.result else: return self.result def save(self, image, from_state=None): self.saved_image = image class FakeGateway(object): def __init__(self, repo): self.repo = repo def get_repo(self, context): return self.repo class TestImagesController(base.StoreClearingUnitTest): def setUp(self): super(TestImagesController, self).setUp() self.config(verbose=True, debug=True) self.image_repo = FakeImageRepo() self.gateway = FakeGateway(self.image_repo) self.controller = glance.api.v2.image_data.ImageDataController( gateway=self.gateway) def test_download(self): request = unit_test_utils.get_fake_request() image = FakeImage('abcd', locations=[{'url': 'http://example.com/image', 'metadata': {}, 'status': 'active'}]) self.image_repo.result = image image = self.controller.download(request, unit_test_utils.UUID1) self.assertEqual('abcd', image.image_id) def test_download_deactivated(self): request = unit_test_utils.get_fake_request() image = FakeImage('abcd', status='deactivated', locations=[{'url': 'http://example.com/image', 'metadata': {}, 'status': 'active'}]) self.image_repo.result = image self.assertRaises(webob.exc.HTTPForbidden, self.controller.download, request, str(uuid.uuid4())) def test_download_no_location(self): # NOTE(mclaren): NoContent will be raised by the ResponseSerializer # That's tested below. request = unit_test_utils.get_fake_request() self.image_repo.result = FakeImage('abcd') image = self.controller.download(request, unit_test_utils.UUID2) self.assertEqual('abcd', image.image_id) def test_download_non_existent_image(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.NotFound() self.assertRaises(webob.exc.HTTPNotFound, self.controller.download, request, str(uuid.uuid4())) def test_download_forbidden(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.Forbidden() self.assertRaises(webob.exc.HTTPForbidden, self.controller.download, request, str(uuid.uuid4())) def test_download_ok_when_get_image_location_forbidden(self): class ImageLocations(object): def __len__(self): raise exception.Forbidden() request = unit_test_utils.get_fake_request() image = FakeImage('abcd') self.image_repo.result = image image.locations = ImageLocations() image = self.controller.download(request, unit_test_utils.UUID1) self.assertEqual('abcd', image.image_id) def test_upload(self): request = unit_test_utils.get_fake_request() image = FakeImage('abcd') self.image_repo.result = image self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) self.assertEqual('YYYY', image.data) self.assertEqual(4, image.size) def test_upload_status(self): request = unit_test_utils.get_fake_request() image = FakeImage('abcd') self.image_repo.result = image insurance = {'called': False} def read_data(): insurance['called'] = True self.assertEqual('saving', self.image_repo.saved_image.status) yield 'YYYY' self.controller.upload(request, unit_test_utils.UUID2, read_data(), None) self.assertTrue(insurance['called']) self.assertEqual('modified-by-fake', self.image_repo.saved_image.status) def test_upload_no_size(self): request = unit_test_utils.get_fake_request() image = FakeImage('abcd') self.image_repo.result = image self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', None) self.assertEqual('YYYY', image.data) self.assertIsNone(image.size) def test_upload_invalid(self): request = unit_test_utils.get_fake_request() image = FakeImage('abcd') image.status = ValueError() self.image_repo.result = image self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) def test_upload_with_expired_token(self): def side_effect(image, from_state=None): if from_state == 'saving': raise exception.NotAuthenticated() mocked_save = mock.Mock(side_effect=side_effect) mocked_delete = mock.Mock() request = unit_test_utils.get_fake_request() image = FakeImage('abcd') image.delete = mocked_delete self.image_repo.result = image self.image_repo.save = mocked_save self.assertRaises(webob.exc.HTTPUnauthorized, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) self.assertEqual(3, mocked_save.call_count) mocked_delete.assert_called_once_with() def test_upload_non_existent_image_during_save_initiates_deletion(self): def fake_save_not_found(self): raise exception.ImageNotFound() def fake_save_conflict(self): raise exception.Conflict() for fun in [fake_save_not_found, fake_save_conflict]: request = unit_test_utils.get_fake_request() image = FakeImage('abcd', locations=['http://example.com/image']) self.image_repo.result = image self.image_repo.save = fun image.delete = mock.Mock() self.assertRaises(webob.exc.HTTPGone, self.controller.upload, request, str(uuid.uuid4()), 'ABC', 3) self.assertTrue(image.delete.called) def test_upload_non_existent_image_raises_image_not_found_exception(self): def fake_save(self): raise exception.ImageNotFound() def fake_delete(): raise exception.ImageNotFound() request = unit_test_utils.get_fake_request() image = FakeImage('abcd', locations=['http://example.com/image']) self.image_repo.result = image self.image_repo.save = fake_save image.delete = fake_delete self.assertRaises(webob.exc.HTTPGone, self.controller.upload, request, str(uuid.uuid4()), 'ABC', 3) def test_upload_non_existent_image_raises_store_not_found_exception(self): def fake_save(self): raise glance_store.NotFound() def fake_delete(): raise exception.ImageNotFound() request = unit_test_utils.get_fake_request() image = FakeImage('abcd', locations=['http://example.com/image']) self.image_repo.result = image self.image_repo.save = fake_save image.delete = fake_delete self.assertRaises(webob.exc.HTTPGone, self.controller.upload, request, str(uuid.uuid4()), 'ABC', 3) def test_upload_non_existent_image_before_save(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.NotFound() self.assertRaises(webob.exc.HTTPNotFound, self.controller.upload, request, str(uuid.uuid4()), 'ABC', 3) def test_upload_data_exists(self): request = unit_test_utils.get_fake_request() image = FakeImage() exc = exception.InvalidImageStatusTransition(cur_status='active', new_status='queued') image.set_data = Raise(exc) self.image_repo.result = image self.assertRaises(webob.exc.HTTPConflict, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) def test_upload_storage_full(self): request = unit_test_utils.get_fake_request() image = FakeImage() image.set_data = Raise(glance_store.StorageFull) self.image_repo.result = image self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.upload, request, unit_test_utils.UUID2, 'YYYYYYY', 7) def test_upload_signature_verification_fails(self): request = unit_test_utils.get_fake_request() image = FakeImage() image.set_data = Raise(exception.SignatureVerificationError) self.image_repo.result = image self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload, request, unit_test_utils.UUID1, 'YYYY', 4) self.assertEqual('killed', self.image_repo.saved_image.status) def test_image_size_limit_exceeded(self): request = unit_test_utils.get_fake_request() image = FakeImage() image.set_data = Raise(exception.ImageSizeLimitExceeded) self.image_repo.result = image self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.upload, request, unit_test_utils.UUID1, 'YYYYYYY', 7) def test_upload_storage_quota_full(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.StorageQuotaFull("message") self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.upload, request, unit_test_utils.UUID1, 'YYYYYYY', 7) def test_upload_storage_forbidden(self): request = unit_test_utils.get_fake_request(user=unit_test_utils.USER2) image = FakeImage() image.set_data = Raise(exception.Forbidden) self.image_repo.result = image self.assertRaises(webob.exc.HTTPForbidden, self.controller.upload, request, unit_test_utils.UUID2, 'YY', 2) def test_upload_storage_internal_error(self): request = unit_test_utils.get_fake_request() self.image_repo.result = exception.ServerError() self.assertRaises(exception.ServerError, self.controller.upload, request, unit_test_utils.UUID1, 'ABC', 3) def test_upload_storage_write_denied(self): request = unit_test_utils.get_fake_request(user=unit_test_utils.USER3) image = FakeImage() image.set_data = Raise(glance_store.StorageWriteDenied) self.image_repo.result = image self.assertRaises(webob.exc.HTTPServiceUnavailable, self.controller.upload, request, unit_test_utils.UUID2, 'YY', 2) def test_upload_storage_store_disabled(self): """Test that uploading an image file raises StoreDisabled exception""" request = unit_test_utils.get_fake_request(user=unit_test_utils.USER3) image = FakeImage() image.set_data = Raise(glance_store.StoreAddDisabled) self.image_repo.result = image self.assertRaises(webob.exc.HTTPGone, self.controller.upload, request, unit_test_utils.UUID2, 'YY', 2) @mock.patch("glance.common.trust_auth.TokenRefresher") def test_upload_with_trusts(self, mock_refresher): """Test that uploading with registry correctly uses trusts""" # initialize trust environment self.config(data_api='glance.db.registry.api') refresher = mock.MagicMock() mock_refresher.return_value = refresher refresher.refresh_token.return_value = "fake_token" # request an image upload request = unit_test_utils.get_fake_request() request.environ['keystone.token_auth'] = mock.MagicMock() request.environ['keystone.token_info'] = { 'token': { 'roles': [{'name': 'FakeRole', 'id': 'FakeID'}] } } image = FakeImage('abcd') self.image_repo.result = image mock_fake_save = mock.Mock() mock_fake_save.side_effect = [None, exception.NotAuthenticated, None] temp_save = FakeImageRepo.save # mocking save to raise NotAuthenticated on the second call FakeImageRepo.save = mock_fake_save self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) # check image data self.assertEqual('YYYY', image.data) self.assertEqual(4, image.size) FakeImageRepo.save = temp_save # check that token has been correctly acquired and deleted mock_refresher.assert_called_once_with( request.environ['keystone.token_auth'], request.context.tenant, ['FakeRole']) refresher.refresh_token.assert_called_once_with() refresher.release_resources.assert_called_once_with() self.assertEqual("fake_token", request.context.auth_token) @mock.patch("glance.common.trust_auth.TokenRefresher") def test_upload_with_trusts_fails(self, mock_refresher): """Test upload with registry if trust was not successfully created""" # initialize trust environment self.config(data_api='glance.db.registry.api') mock_refresher().side_effect = Exception() # request an image upload request = unit_test_utils.get_fake_request() image = FakeImage('abcd') self.image_repo.result = image self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) # check image data self.assertEqual('YYYY', image.data) self.assertEqual(4, image.size) # check that the token has not been updated self.assertEqual(0, mock_refresher().refresh_token.call_count) def _test_upload_download_prepare_notification(self): request = unit_test_utils.get_fake_request() self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) output = self.controller.download(request, unit_test_utils.UUID2) output_log = self.notifier.get_logs() prepare_payload = output['meta'].copy() prepare_payload['checksum'] = None prepare_payload['size'] = None prepare_payload['virtual_size'] = None prepare_payload['location'] = None prepare_payload['status'] = 'queued' del prepare_payload['updated_at'] prepare_log = { 'notification_type': "INFO", 'event_type': "image.prepare", 'payload': prepare_payload, } self.assertEqual(3, len(output_log)) prepare_updated_at = output_log[0]['payload']['updated_at'] del output_log[0]['payload']['updated_at'] self.assertLessEqual(prepare_updated_at, output['meta']['updated_at']) self.assertEqual(prepare_log, output_log[0]) def _test_upload_download_upload_notification(self): request = unit_test_utils.get_fake_request() self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) output = self.controller.download(request, unit_test_utils.UUID2) output_log = self.notifier.get_logs() upload_payload = output['meta'].copy() upload_log = { 'notification_type': "INFO", 'event_type': "image.upload", 'payload': upload_payload, } self.assertEqual(3, len(output_log)) self.assertEqual(upload_log, output_log[1]) def _test_upload_download_activate_notification(self): request = unit_test_utils.get_fake_request() self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4) output = self.controller.download(request, unit_test_utils.UUID2) output_log = self.notifier.get_logs() activate_payload = output['meta'].copy() activate_log = { 'notification_type': "INFO", 'event_type': "image.activate", 'payload': activate_payload, } self.assertEqual(3, len(output_log)) self.assertEqual(activate_log, output_log[2]) def test_restore_image_when_upload_failed(self): request = unit_test_utils.get_fake_request() image = FakeImage('fake') image.set_data = Raise(glance_store.StorageWriteDenied) self.image_repo.result = image self.assertRaises(webob.exc.HTTPServiceUnavailable, self.controller.upload, request, unit_test_utils.UUID2, 'ZZZ', 3) self.assertEqual('queued', self.image_repo.saved_image.status) class TestImageDataDeserializer(test_utils.BaseTestCase): def setUp(self): super(TestImageDataDeserializer, self).setUp() self.deserializer = glance.api.v2.image_data.RequestDeserializer() def test_upload(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-stream' request.body = b'YYY' request.headers['Content-Length'] = 3 output = self.deserializer.upload(request) data = output.pop('data') self.assertEqual(b'YYY', data.read()) expected = {'size': 3} self.assertEqual(expected, output) def test_upload_chunked(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-stream' # If we use body_file, webob assumes we want to do a chunked upload, # ignoring the Content-Length header request.body_file = six.StringIO('YYY') output = self.deserializer.upload(request) data = output.pop('data') self.assertEqual('YYY', data.read()) expected = {'size': None} self.assertEqual(expected, output) def test_upload_chunked_with_content_length(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-stream' request.body_file = six.BytesIO(b'YYY') # The deserializer shouldn't care if the Content-Length is # set when the user is attempting to send chunked data. request.headers['Content-Length'] = 3 output = self.deserializer.upload(request) data = output.pop('data') self.assertEqual(b'YYY', data.read()) expected = {'size': 3} self.assertEqual(expected, output) def test_upload_with_incorrect_content_length(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-stream' # The deserializer shouldn't care if the Content-Length and # actual request body length differ. That job is left up # to the controller request.body = b'YYY' request.headers['Content-Length'] = 4 output = self.deserializer.upload(request) data = output.pop('data') self.assertEqual(b'YYY', data.read()) expected = {'size': 4} self.assertEqual(expected, output) def test_upload_wrong_content_type(self): request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/json' request.body = b'YYYYY' self.assertRaises(webob.exc.HTTPUnsupportedMediaType, self.deserializer.upload, request) request = unit_test_utils.get_fake_request() request.headers['Content-Type'] = 'application/octet-st' request.body = b'YYYYY' self.assertRaises(webob.exc.HTTPUnsupportedMediaType, self.deserializer.upload, request) class TestImageDataSerializer(test_utils.BaseTestCase): def setUp(self): super(TestImageDataSerializer, self).setUp() self.serializer = glance.api.v2.image_data.ResponseSerializer() def test_download(self): request = wsgi.Request.blank('/') request.environ = {} response = webob.Response() response.request = request image = FakeImage(size=3, data=[b'Z', b'Z', b'Z']) self.serializer.download(response, image) self.assertEqual(b'ZZZ', response.body) self.assertEqual('3', response.headers['Content-Length']) self.assertNotIn('Content-MD5', response.headers) self.assertEqual('application/octet-stream', response.headers['Content-Type']) def test_download_with_checksum(self): request = wsgi.Request.blank('/') request.environ = {} response = webob.Response() response.request = request checksum = '0745064918b49693cca64d6b6a13d28a' image = FakeImage(size=3, checksum=checksum, data=[b'Z', b'Z', b'Z']) self.serializer.download(response, image) self.assertEqual(b'ZZZ', response.body) self.assertEqual('3', response.headers['Content-Length']) self.assertEqual(checksum, response.headers['Content-MD5']) self.assertEqual('application/octet-stream', response.headers['Content-Type']) def test_download_forbidden(self): """Make sure the serializer can return 403 forbidden error instead of 500 internal server error. """ def get_data(*args, **kwargs): raise exception.Forbidden() self.stubs.Set(glance.api.policy.ImageProxy, 'get_data', get_data) request = wsgi.Request.blank('/') request.environ = {} response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = get_data self.assertRaises(webob.exc.HTTPForbidden, self.serializer.download, response, image) def test_download_no_content(self): """Test image download returns HTTPNoContent Make sure that serializer returns 204 no content error in case of image data is not available at specified location. """ with mock.patch.object(glance.api.policy.ImageProxy, 'get_data') as mock_get_data: mock_get_data.side_effect = glance_store.NotFound(image="image") request = wsgi.Request.blank('/') response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = mock_get_data self.assertRaises(webob.exc.HTTPNoContent, self.serializer.download, response, image) def test_download_service_unavailable(self): """Test image download returns HTTPServiceUnavailable.""" with mock.patch.object(glance.api.policy.ImageProxy, 'get_data') as mock_get_data: mock_get_data.side_effect = glance_store.RemoteServiceUnavailable() request = wsgi.Request.blank('/') response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = mock_get_data self.assertRaises(webob.exc.HTTPServiceUnavailable, self.serializer.download, response, image) def test_download_store_get_not_support(self): """Test image download returns HTTPBadRequest. Make sure that serializer returns 400 bad request error in case of getting images from this store is not supported at specified location. """ with mock.patch.object(glance.api.policy.ImageProxy, 'get_data') as mock_get_data: mock_get_data.side_effect = glance_store.StoreGetNotSupported() request = wsgi.Request.blank('/') response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = mock_get_data self.assertRaises(webob.exc.HTTPBadRequest, self.serializer.download, response, image) def test_download_store_random_get_not_support(self): """Test image download returns HTTPBadRequest. Make sure that serializer returns 400 bad request error in case of getting randomly images from this store is not supported at specified location. """ with mock.patch.object(glance.api.policy.ImageProxy, 'get_data') as m_get_data: err = glance_store.StoreRandomGetNotSupported(offset=0, chunk_size=0) m_get_data.side_effect = err request = wsgi.Request.blank('/') response = webob.Response() response.request = request image = FakeImage(size=3, data=iter('ZZZ')) image.get_data = m_get_data self.assertRaises(webob.exc.HTTPBadRequest, self.serializer.download, response, image) def test_upload(self): request = webob.Request.blank('/') request.environ = {} response = webob.Response() response.request = request self.serializer.upload(response, {}) self.assertEqual(204, response.status_int) self.assertEqual('0', response.headers['Content-Length']) glance-12.0.0/glance/tests/unit/v2/test_registry_api.py0000664000567000056710000016340612701407047024213 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_config import cfg from oslo_serialization import jsonutils import routes import six import webob import glance.api.common import glance.common.config from glance.common import timeutils import glance.context from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import models as db_models from glance.registry.api import v2 as rserver from glance.tests.unit import base from glance.tests import utils as test_utils CONF = cfg.CONF _gen_uuid = lambda: str(uuid.uuid4()) UUID1 = _gen_uuid() UUID2 = _gen_uuid() class TestRegistryRPC(base.IsolatedUnitTest): def setUp(self): super(TestRegistryRPC, self).setUp() self.mapper = routes.Mapper() self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=True) uuid1_time = timeutils.utcnow() uuid2_time = uuid1_time + datetime.timedelta(seconds=5) self.FIXTURES = [ {'id': UUID1, 'name': 'fake image #1', 'status': 'active', 'disk_format': 'ami', 'container_format': 'ami', 'is_public': False, 'created_at': uuid1_time, 'updated_at': uuid1_time, 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 0, 'min_ram': 0, 'size': 13, 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1), 'metadata': {}, 'status': 'active'}], 'properties': {'type': 'kernel'}}, {'id': UUID2, 'name': 'fake image #2', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'created_at': uuid2_time, 'updated_at': uuid2_time, 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 5, 'min_ram': 256, 'size': 19, 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2), 'metadata': {}, 'status': 'active'}], 'properties': {}}] self.context = glance.context.RequestContext(is_admin=True) db_api.get_engine() self.destroy_fixtures() self.create_fixtures() def tearDown(self): """Clear the test environment""" super(TestRegistryRPC, self).tearDown() self.destroy_fixtures() def create_fixtures(self): for fixture in self.FIXTURES: db_api.image_create(self.context, fixture) # We write a fake image file to the filesystem with open("%s/%s" % (self.test_dir, fixture['id']), 'wb') as image: image.write(b"chunk00000remainder") image.flush() def destroy_fixtures(self): # Easiest to just drop the models and re-create them... db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def _compare_images_and_uuids(self, uuids, images): self.assertListEqual(uuids, [image['id'] for image in images]) def test_show(self): """Tests that registry API endpoint returns the expected image.""" fixture = {'id': UUID2, 'name': 'fake image #2', 'size': 19, 'min_ram': 256, 'min_disk': 5, 'checksum': None} req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get', 'kwargs': {'image_id': UUID2}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] image = res_dict for k, v in six.iteritems(fixture): self.assertEqual(v, image[k]) def test_show_unknown(self): """Tests the registry API endpoint returns 404 for an unknown id.""" req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get', 'kwargs': {'image_id': _gen_uuid()}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) res_dict = jsonutils.loads(res.body)[0] self.assertEqual('glance.common.exception.ImageNotFound', res_dict["_error"]["cls"]) def test_get_index(self): """Tests that the image_get_all command returns list of images.""" fixture = {'id': UUID2, 'name': 'fake image #2', 'size': 19, 'checksum': None} req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': fixture}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(1, len(images)) for k, v in six.iteritems(fixture): self.assertEqual(v, images[0][k]) def test_get_index_marker(self): """Tests that the registry API returns list of public images. Must conforms to a marker query param. """ uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid4_time = uuid5_time + datetime.timedelta(seconds=5) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None, 'created_at': uuid3_time, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid4_time, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = {'id': UUID5, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid5_time, 'updated_at': uuid5_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': UUID4, "is_public": True}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] # should be sorted by created_at desc, id desc # page should start after marker 4 uuid_list = [UUID5, UUID2] self._compare_images_and_uuids(uuid_list, images) def test_get_index_marker_and_name_asc(self): """Test marker and null name ascending Tests that the registry API returns 200 when a marker and a null name are combined ascending order """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': None, 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': UUID3, 'sort_key': ['name'], 'sort_dir': ['asc']}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(2, len(images)) def test_get_index_marker_and_name_desc(self): """Test marker and null name descending Tests that the registry API returns 200 when a marker and a null name are combined descending order """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': None, 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': UUID3, 'sort_key': ['name'], 'sort_dir': ['desc']}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) def test_get_index_marker_and_disk_format_asc(self): """Test marker and null disk format ascending Tests that the registry API returns 200 when a marker and a null disk_format are combined ascending order """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': None, 'container_format': 'ovf', 'name': 'Fake image', 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': UUID3, 'sort_key': ['disk_format'], 'sort_dir': ['asc']}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(2, len(images)) def test_get_index_marker_and_disk_format_desc(self): """Test marker and null disk format descending Tests that the registry API returns 200 when a marker and a null disk_format are combined descending order """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': None, 'container_format': 'ovf', 'name': 'Fake image', 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': UUID3, 'sort_key': ['disk_format'], 'sort_dir': ['desc']}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) def test_get_index_marker_and_container_format_asc(self): """Test marker and null container format ascending Tests that the registry API returns 200 when a marker and a null container_format are combined ascending order """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': None, 'name': 'Fake image', 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': UUID3, 'sort_key': ['container_format'], 'sort_dir': ['asc']}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(2, len(images)) def test_get_index_marker_and_container_format_desc(self): """Test marker and null container format descending Tests that the registry API returns 200 when a marker and a null container_format are combined descending order """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': None, 'name': 'Fake image', 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': UUID3, 'sort_key': ['container_format'], 'sort_dir': ['desc']}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) def test_get_index_unknown_marker(self): """Tests the registry API returns a NotFound with unknown marker.""" req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': _gen_uuid()}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) result = jsonutils.loads(res.body)[0] self.assertIn("_error", result) self.assertIn("NotFound", result["_error"]["cls"]) def test_get_index_limit(self): """Tests that the registry API returns list of public images. Must conforms to a limit query param. """ uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid4_time = uuid3_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None, 'created_at': uuid3_time, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid4_time, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'limit': 1}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) images = jsonutils.loads(res.body)[0] self.assertEqual(200, res.status_int) self._compare_images_and_uuids([UUID4], images) def test_get_index_limit_marker(self): """Tests that the registry API returns list of public images. Must conforms to limit and marker query params. """ uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid4_time = uuid3_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None, 'created_at': uuid3_time, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) extra_fixture = {'id': _gen_uuid(), 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid4_time, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'marker': UUID3, 'limit': 1}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(200, res.status_int) images = res_dict self._compare_images_and_uuids([UUID2], images) def test_get_index_filter_name(self): """Tests that the registry API returns list of public images. Use a specific name. This is really a sanity check, filtering is tested more in-depth using /images/detail """ extra_fixture = {'id': _gen_uuid(), 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) extra_fixture = {'id': _gen_uuid(), 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'name': 'new name! #123'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(200, res.status_int) images = res_dict self.assertEqual(2, len(images)) for image in images: self.assertEqual('new name! #123', image['name']) def test_get_index_filter_on_user_defined_properties(self): """Tests that the registry API returns list of public images. Use a specific user-defined properties. """ properties = {'distro': 'ubuntu', 'arch': 'i386', 'type': 'kernel'} extra_id = _gen_uuid() extra_fixture = {'id': extra_id, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'image-extra-1', 'size': 19, 'properties': properties, 'checksum': None} db_api.image_create(self.context, extra_fixture) # testing with a common property. req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'type': 'kernel'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(2, len(images)) self.assertEqual(extra_id, images[0]['id']) self.assertEqual(UUID1, images[1]['id']) # testing with a non-existent value for a common property. cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'type': 'random'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) # testing with a non-existent value for a common property. cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'type': 'random'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) # testing with a non-existent property. cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'poo': 'random'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) # testing with multiple existing properties. cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'type': 'kernel', 'distro': 'ubuntu'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(1, len(images)) self.assertEqual(extra_id, images[0]['id']) # testing with multiple existing properties but non-existent values. cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'type': 'random', 'distro': 'random'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) # testing with multiple non-existing properties. cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'typo': 'random', 'poo': 'random'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) # testing with one existing property and the other non-existing. cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'type': 'kernel', 'poo': 'random'}}, }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)[0] self.assertEqual(0, len(images)) def test_get_index_sort_default_created_at_desc(self): """Tests that the registry API returns list of public images. Must conforms to a default sort key/dir. """ uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid4_time = uuid5_time + datetime.timedelta(seconds=5) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None, 'created_at': uuid3_time, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid4_time, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = {'id': UUID5, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid5_time, 'updated_at': uuid5_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(200, res.status_int) images = res_dict # (flaper87)registry's v1 forced is_public to True # when no value was specified. This is not # the default behaviour anymore. uuid_list = [UUID3, UUID4, UUID5, UUID2, UUID1] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_name_asc(self): """Tests that the registry API returns list of public images. Must be sorted alphabetically by name in ascending order. """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'asdf', 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'xyz', 'size': 20, 'checksum': None} db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = {'id': UUID5, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': None, 'size': 20, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['name'], 'sort_dir': ['asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID5, UUID3, UUID1, UUID2, UUID4] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_status_desc(self): """Tests that the registry API returns list of public images. Must be sorted alphabetically by status in descending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'queued', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'asdf', 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'xyz', 'size': 20, 'checksum': None, 'created_at': uuid4_time, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['status'], 'sort_dir': ['asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID1, UUID2, UUID4, UUID3] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_disk_format_asc(self): """Tests that the registry API returns list of public images. Must be sorted alphabetically by disk_format in ascending order. """ uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'ami', 'container_format': 'ami', 'name': 'asdf', 'size': 19, 'checksum': None, 'created_at': uuid3_time, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vdi', 'container_format': 'ovf', 'name': 'xyz', 'size': 20, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['disk_format'], 'sort_dir': ['asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID1, UUID3, UUID4, UUID2] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_container_format_desc(self): """Tests that the registry API returns list of public images. Must be sorted alphabetically by container_format in descending order. """ uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'ami', 'container_format': 'ami', 'name': 'asdf', 'size': 19, 'checksum': None, 'created_at': uuid3_time, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'iso', 'container_format': 'bare', 'name': 'xyz', 'size': 20, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['container_format'], 'sort_dir': ['desc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID2, UUID4, UUID3, UUID1] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_size_asc(self): """Tests that the registry API returns list of public images. Must be sorted by size in ascending order. """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'ami', 'container_format': 'ami', 'name': 'asdf', 'size': 100, 'checksum': None} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'iso', 'container_format': 'bare', 'name': 'xyz', 'size': 2, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['size'], 'sort_dir': ['asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID4, UUID1, UUID2, UUID3] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_created_at_asc(self): """Tests that the registry API returns list of public images. Must be sorted by created_at in ascending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None, 'created_at': uuid3_time, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': uuid4_time, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['created_at'], 'sort_dir': ['asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID1, UUID2, UUID4, UUID3] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_updated_at_desc(self): """Tests that the registry API returns list of public images. Must be sorted by updated_at in descending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 19, 'checksum': None, 'created_at': None, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'new name! #123', 'size': 20, 'checksum': None, 'created_at': None, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['updated_at'], 'sort_dir': ['desc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID3, UUID4, UUID2, UUID1] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_multiple_keys_one_sort_dir(self): """ Tests that the registry API returns list of public images sorted by name-size and size-name with ascending sort direction. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'asdf', 'size': 19, 'checksum': None, 'created_at': None, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'xyz', 'size': 20, 'checksum': None, 'created_at': None, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = {'id': UUID5, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'asdf', 'size': 20, 'checksum': None, 'created_at': None, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['name', 'size'], 'sort_dir': ['asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID3, UUID5, UUID1, UUID2, UUID4] self._compare_images_and_uuids(uuid_list, images) cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['size', 'name'], 'sort_dir': ['asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID1, UUID3, UUID2, UUID5, UUID4] self._compare_images_and_uuids(uuid_list, images) def test_get_index_sort_multiple_keys_multiple_sort_dirs(self): """ Tests that the registry API returns list of public images sorted by name-size and size-name with ascending and descending directions. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'asdf', 'size': 19, 'checksum': None, 'created_at': None, 'updated_at': uuid3_time} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'xyz', 'size': 20, 'checksum': None, 'created_at': None, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = {'id': UUID5, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'asdf', 'size': 20, 'checksum': None, 'created_at': None, 'updated_at': uuid4_time} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['name', 'size'], 'sort_dir': ['desc', 'asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID4, UUID2, UUID1, UUID3, UUID5] self._compare_images_and_uuids(uuid_list, images) cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['size', 'name'], 'sort_dir': ['desc', 'asc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID5, UUID4, UUID3, UUID2, UUID1] self._compare_images_and_uuids(uuid_list, images) cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['name', 'size'], 'sort_dir': ['asc', 'desc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID5, UUID3, UUID1, UUID2, UUID4] self._compare_images_and_uuids(uuid_list, images) cmd = [{ 'command': 'image_get_all', 'kwargs': {'sort_key': ['size', 'name'], 'sort_dir': ['asc', 'desc']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] images = res_dict uuid_list = [UUID1, UUID2, UUID3, UUID4, UUID5] self._compare_images_and_uuids(uuid_list, images) def test_create_image(self): """Tests that the registry API creates the image""" fixture = {'name': 'fake public image', 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf'} req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_create', 'kwargs': {'values': fixture} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] for k, v in six.iteritems(fixture): self.assertEqual(v, res_dict[k]) # Test status was updated properly self.assertEqual('active', res_dict['status']) def test_create_image_with_min_disk(self): """Tests that the registry API creates the image""" fixture = {'name': 'fake public image', 'is_public': True, 'status': 'active', 'min_disk': 5, 'disk_format': 'vhd', 'container_format': 'ovf'} req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_create', 'kwargs': {'values': fixture} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(fixture['min_disk'], res_dict['min_disk']) def test_create_image_with_min_ram(self): """Tests that the registry API creates the image""" fixture = {'name': 'fake public image', 'is_public': True, 'status': 'active', 'min_ram': 256, 'disk_format': 'vhd', 'container_format': 'ovf'} req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_create', 'kwargs': {'values': fixture} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(fixture['min_ram'], res_dict['min_ram']) def test_create_image_with_min_ram_default(self): """Tests that the registry API creates the image""" fixture = {'name': 'fake public image', 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf'} req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_create', 'kwargs': {'values': fixture} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(0, res_dict['min_ram']) def test_create_image_with_min_disk_default(self): """Tests that the registry API creates the image""" fixture = {'name': 'fake public image', 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf'} req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_create', 'kwargs': {'values': fixture} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(0, res_dict['min_disk']) def test_update_image(self): """Tests that the registry API updates the image""" fixture = {'name': 'fake public image #2', 'min_disk': 5, 'min_ram': 256, 'disk_format': 'raw'} req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_update', 'kwargs': {'values': fixture, 'image_id': UUID2} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body)[0] self.assertNotEqual(res_dict['created_at'], res_dict['updated_at']) for k, v in six.iteritems(fixture): self.assertEqual(v, res_dict[k]) def _send_request(self, command, kwargs, method): req = webob.Request.blank('/rpc') req.method = method cmd = [{'command': command, 'kwargs': kwargs}] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) res_dict = jsonutils.loads(res.body)[0] return res.status_int, res_dict def _expect_fail(self, command, kwargs, error_cls, method='POST'): # on any exception status_int is always 200, so have to check _error # dict code, res_dict = self._send_request(command, kwargs, method) self.assertIn('_error', res_dict) self.assertEqual(error_cls, res_dict['_error']['cls']) return res_dict def _expect_ok(self, command, kwargs, method, expected_status=200): code, res_dict = self._send_request(command, kwargs) self.assertEqual(expected_status, code) return res_dict def test_create_image_bad_name(self): fixture = {'name': u'A bad name \U0001fff2', 'status': 'queued'} self._expect_fail('image_create', {'values': fixture}, 'glance.common.exception.Invalid') def test_create_image_bad_location(self): fixture = {'status': 'queued', 'locations': [{'url': u'file:///tmp/tests/\U0001fee2', 'metadata': {}, 'status': 'active'}]} self._expect_fail('image_create', {'values': fixture}, 'glance.common.exception.Invalid') def test_create_image_bad_property(self): fixture = {'status': 'queued', 'properties': {'ok key': u' bad value \U0001f2aa'}} self._expect_fail('image_create', {'values': fixture}, 'glance.common.exception.Invalid') fixture = {'status': 'queued', 'properties': {u'invalid key \U00010020': 'ok value'}} self._expect_fail('image_create', {'values': fixture}, 'glance.common.exception.Invalid') def test_update_image_bad_tag(self): self._expect_fail('image_tag_create', {'value': u'\U0001fff2', 'image_id': UUID2}, 'glance.common.exception.Invalid') def test_update_image_bad_name(self): fixture = {'name': u'A bad name \U0001fff2'} self._expect_fail('image_update', {'values': fixture, 'image_id': UUID1}, 'glance.common.exception.Invalid') def test_update_image_bad_location(self): fixture = {'locations': [{'url': u'file:///tmp/glance-tests/\U0001fee2', 'metadata': {}, 'status': 'active'}]} self._expect_fail('image_update', {'values': fixture, 'image_id': UUID1}, 'glance.common.exception.Invalid') def test_update_bad_property(self): fixture = {'properties': {'ok key': u' bad value \U0001f2aa'}} self._expect_fail('image_update', {'values': fixture, 'image_id': UUID2}, 'glance.common.exception.Invalid') fixture = {'properties': {u'invalid key \U00010020': 'ok value'}} self._expect_fail('image_update', {'values': fixture, 'image_id': UUID2}, 'glance.common.exception.Invalid') def test_delete_image(self): """Tests that the registry API deletes the image""" # Grab the original number of images req = webob.Request.blank('/rpc') req.method = "POST" cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'deleted': False}} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(200, res.status_int) orig_num_images = len(res_dict) # Delete image #2 cmd = [{ 'command': 'image_destroy', 'kwargs': {'image_id': UUID2} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Verify one less image cmd = [{ 'command': 'image_get_all', 'kwargs': {'filters': {'deleted': False}} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) res_dict = jsonutils.loads(res.body)[0] self.assertEqual(200, res.status_int) new_num_images = len(res_dict) self.assertEqual(new_num_images, orig_num_images - 1) def test_delete_image_response(self): """Tests that the registry API delete returns the image metadata""" image = self.FIXTURES[0] req = webob.Request.blank('/rpc') req.method = 'POST' cmd = [{ 'command': 'image_destroy', 'kwargs': {'image_id': image['id']} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) deleted_image = jsonutils.loads(res.body)[0] self.assertEqual(image['id'], deleted_image['id']) self.assertTrue(deleted_image['deleted']) self.assertTrue(deleted_image['deleted_at']) def test_get_image_members(self): """Tests members listing for existing images.""" req = webob.Request.blank('/rpc') req.method = 'POST' cmd = [{ 'command': 'image_member_find', 'kwargs': {'image_id': UUID2} }] req.body = jsonutils.dump_as_bytes(cmd) res = req.get_response(self.api) self.assertEqual(200, res.status_int) memb_list = jsonutils.loads(res.body)[0] self.assertEqual(0, len(memb_list)) glance-12.0.0/glance/tests/unit/v2/test_registry_client.py0000664000567000056710000007403112701407047024713 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Glance Registry's client. This tests are temporary and will be removed once the registry's driver tests will be added. """ import copy import datetime import os import uuid from mock import patch from six.moves import reload_module from glance.common import config from glance.common import exception from glance.common import timeutils from glance import context from glance.db.sqlalchemy import api as db_api from glance.i18n import _ from glance.registry.api import v2 as rserver import glance.registry.client.v2.api as rapi from glance.registry.client.v2.api import client as rclient from glance.tests.unit import base from glance.tests import utils as test_utils _gen_uuid = lambda: str(uuid.uuid4()) UUID1 = str(uuid.uuid4()) UUID2 = str(uuid.uuid4()) # NOTE(bcwaldon): needed to init config_dir cli opt config.parse_args(args=[]) class TestRegistryV2Client(base.IsolatedUnitTest, test_utils.RegistryAPIMixIn): """Test proper actions made against a registry service. Test for both valid and invalid requests. """ # Registry server to user # in the stub. registry = rserver def setUp(self): """Establish a clean test environment""" super(TestRegistryV2Client, self).setUp() db_api.get_engine() self.context = context.RequestContext(is_admin=True) uuid1_time = timeutils.utcnow() uuid2_time = uuid1_time + datetime.timedelta(seconds=5) self.FIXTURES = [ self.get_extra_fixture( id=UUID1, name='fake image #1', is_public=False, disk_format='ami', container_format='ami', size=13, virtual_size=26, properties={'type': 'kernel'}, location="swift://user:passwd@acct/container/obj.tar.0", created_at=uuid1_time), self.get_extra_fixture(id=UUID2, name='fake image #2', properties={}, size=19, virtual_size=38, location="file:///tmp/glance-tests/2", created_at=uuid2_time)] self.destroy_fixtures() self.create_fixtures() self.client = rclient.RegistryClient("0.0.0.0") def tearDown(self): """Clear the test environment""" super(TestRegistryV2Client, self).tearDown() self.destroy_fixtures() def test_image_get_index(self): """Test correct set of public image returned""" images = self.client.image_get_all() self.assertEqual(2, len(images)) def test_create_image_with_null_min_disk_min_ram(self): UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None, min_ram=None) db_api.image_create(self.context, extra_fixture) image = self.client.image_get(image_id=UUID3) self.assertEqual(0, image["min_ram"]) self.assertEqual(0, image["min_disk"]) def test_get_index_sort_name_asc(self): """Tests that the registry API returns list of public images. Must be sorted alphabetically by name in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz') db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['name'], sort_dir=['asc']) self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4), unjsonify=False) def test_get_index_sort_status_desc(self): """Tests that the registry API returns list of public images. Must be sorted alphabetically by status in descending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', status='queued') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', created_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['status'], sort_dir=['desc']) self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1), unjsonify=False) def test_get_index_sort_disk_format_asc(self): """Tests that the registry API returns list of public images. Must besorted alphabetically by disk_format in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', disk_format='vdi') db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['disk_format'], sort_dir=['asc']) self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2), unjsonify=False) def test_get_index_sort_container_format_desc(self): """Tests that the registry API returns list of public images. Must be sorted alphabetically by container_format in descending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', disk_format='iso', container_format='bare') db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['container_format'], sort_dir=['desc']) self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1), unjsonify=False) def test_get_index_sort_size_asc(self): """Tests that the registry API returns list of public images. Must be sorted by size in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', disk_format='ami', container_format='ami', size=100, virtual_size=200) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='asdf', disk_format='iso', container_format='bare', size=2, virtual_size=4) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['size'], sort_dir=['asc']) self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3), unjsonify=False) def test_get_index_sort_created_at_asc(self): """Tests that the registry API returns list of public images. Must be sorted by created_at in ascending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=uuid3_time) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['created_at'], sort_dir=['asc']) self.assertEqualImages(images, (UUID1, UUID2, UUID4, UUID3), unjsonify=False) def test_get_index_sort_updated_at_desc(self): """Tests that the registry API returns list of public images. Must be sorted by updated_at in descending order. """ uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=None, updated_at=uuid3_time) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=None, updated_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['updated_at'], sort_dir=['desc']) self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1), unjsonify=False) def test_get_image_details_sort_multiple_keys(self): """ Tests that a detailed call returns list of public images sorted by name-size and size-name in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', size=19) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name=u'xyz', size=20) db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID5, name=u'asdf', size=20) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['name', 'size'], sort_dir=['asc']) self.assertEqualImages(images, (UUID3, UUID5, UUID1, UUID2, UUID4), unjsonify=False) images = self.client.image_get_all(sort_key=['size', 'name'], sort_dir=['asc']) self.assertEqualImages(images, (UUID1, UUID3, UUID2, UUID5, UUID4), unjsonify=False) def test_get_image_details_sort_multiple_dirs(self): """ Tests that a detailed call returns list of public images sorted by name-size and size-name in ascending and descending orders. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', size=19) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', size=20) db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID5, name='asdf', size=20) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(sort_key=['name', 'size'], sort_dir=['asc', 'desc']) self.assertEqualImages(images, (UUID5, UUID3, UUID1, UUID2, UUID4), unjsonify=False) images = self.client.image_get_all(sort_key=['name', 'size'], sort_dir=['desc', 'asc']) self.assertEqualImages(images, (UUID4, UUID2, UUID1, UUID3, UUID5), unjsonify=False) images = self.client.image_get_all(sort_key=['size', 'name'], sort_dir=['asc', 'desc']) self.assertEqualImages(images, (UUID1, UUID2, UUID3, UUID4, UUID5), unjsonify=False) images = self.client.image_get_all(sort_key=['size', 'name'], sort_dir=['desc', 'asc']) self.assertEqualImages(images, (UUID5, UUID4, UUID3, UUID2, UUID1), unjsonify=False) def test_image_get_index_marker(self): """Test correct set of images returned with marker param.""" uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', status='saving', created_at=uuid3_time) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', status='saving', created_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(marker=UUID3) self.assertEqualImages(images, (UUID4, UUID2, UUID1), unjsonify=False) def test_image_get_index_limit(self): """Test correct number of images returned with limit param.""" extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123', status='saving') db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #125', status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(limit=2) self.assertEqual(2, len(images)) def test_image_get_index_marker_limit(self): """Test correct set of images returned with marker/limit params.""" uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10) uuid3_time = uuid4_time + datetime.timedelta(seconds=5) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', status='saving', created_at=uuid3_time) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', status='saving', created_at=uuid4_time) db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(marker=UUID4, limit=1) self.assertEqualImages(images, (UUID2,), unjsonify=False) def test_image_get_index_limit_None(self): """Test correct set of images returned with limit param == None.""" extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123', status='saving') db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #125', status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(limit=None) self.assertEqual(4, len(images)) def test_image_get_index_by_name(self): """Test correct set of public, name-filtered image returned. This is just a sanity check, we test the details call more in-depth. """ extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123') db_api.image_create(self.context, extra_fixture) images = self.client.image_get_all(filters={'name': 'new name! #123'}) self.assertEqual(1, len(images)) for image in images: self.assertEqual('new name! #123', image['name']) def test_image_get_is_public_v2(self): """Tests that a detailed call can be filtered by a property""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving', properties={'is_public': 'avalue'}) context = copy.copy(self.context) db_api.image_create(context, extra_fixture) filters = {'is_public': 'avalue'} images = self.client.image_get_all(filters=filters) self.assertEqual(1, len(images)) for image in images: self.assertEqual('avalue', image['properties'][0]['value']) def test_image_get(self): """Tests that the detailed info about an image returned""" fixture = self.get_fixture(id=UUID1, name='fake image #1', is_public=False, size=13, virtual_size=26, disk_format='ami', container_format='ami') data = self.client.image_get(image_id=UUID1) for k, v in fixture.items(): el = data[k] self.assertEqual(v, data[k], "Failed v != data[k] where v = %(v)s and " "k = %(k)s and data[k] = %(el)s" % dict(v=v, k=k, el=el)) def test_image_get_non_existing(self): """Tests that NotFound is raised when getting a non-existing image""" self.assertRaises(exception.NotFound, self.client.image_get, image_id=_gen_uuid()) def test_image_create_basic(self): """Tests that we can add image metadata and returns the new id""" fixture = self.get_fixture() new_image = self.client.image_create(values=fixture) # Test all other attributes set data = self.client.image_get(image_id=new_image['id']) for k, v in fixture.items(): self.assertEqual(v, data[k]) # Test status was updated properly self.assertIn('status', data) self.assertEqual('active', data['status']) def test_image_create_with_properties(self): """Tests that we can add image metadata with properties""" fixture = self.get_fixture(location="file:///tmp/glance-tests/2", properties={'distro': 'Ubuntu 10.04 LTS'}) new_image = self.client.image_create(values=fixture) self.assertIn('properties', new_image) self.assertEqual(new_image['properties'][0]['value'], fixture['properties']['distro']) del fixture['location'] del fixture['properties'] for k, v in fixture.items(): self.assertEqual(v, new_image[k]) # Test status was updated properly self.assertIn('status', new_image.keys()) self.assertEqual('active', new_image['status']) def test_image_create_already_exists(self): """Tests proper exception is raised if image with ID already exists""" fixture = self.get_fixture(id=UUID2, location="file:///tmp/glance-tests/2") self.assertRaises(exception.Duplicate, self.client.image_create, values=fixture) def test_image_create_with_bad_status(self): """Tests proper exception is raised if a bad status is set""" fixture = self.get_fixture(status='bad status', location="file:///tmp/glance-tests/2") self.assertRaises(exception.Invalid, self.client.image_create, values=fixture) def test_image_update(self): """Tests that the registry API updates the image""" fixture = {'name': 'fake public image #2', 'disk_format': 'vmdk', 'status': 'saving'} self.assertTrue(self.client.image_update(image_id=UUID2, values=fixture)) # Test all other attributes set data = self.client.image_get(image_id=UUID2) for k, v in fixture.items(): self.assertEqual(v, data[k]) def test_image_update_conflict(self): """Tests that the registry API updates the image""" next_state = 'saving' fixture = {'name': 'fake public image #2', 'disk_format': 'vmdk', 'status': next_state} image = self.client.image_get(image_id=UUID2) current = image['status'] self.assertEqual('active', current) # image is in 'active' state so this should cause a failure. from_state = 'saving' self.assertRaises(exception.Conflict, self.client.image_update, image_id=UUID2, values=fixture, from_state=from_state) try: self.client.image_update(image_id=UUID2, values=fixture, from_state=from_state) except exception.Conflict as exc: msg = (_('cannot transition from %(current)s to ' '%(next)s in update (wanted ' 'from_state=%(from)s)') % {'current': current, 'next': next_state, 'from': from_state}) self.assertEqual(str(exc), msg) def test_image_update_with_invalid_min_disk(self): """Tests that the registry API updates the image""" next_state = 'saving' fixture = {'name': 'fake image', 'disk_format': 'vmdk', 'min_disk': 2 ** 31 + 1, 'status': next_state} image = self.client.image_get(image_id=UUID2) current = image['status'] self.assertEqual('active', current) # image is in 'active' state so this should cause a failure. from_state = 'saving' self.assertRaises(exception.Invalid, self.client.image_update, image_id=UUID2, values=fixture, from_state=from_state) def test_image_update_with_invalid_min_ram(self): """Tests that the registry API updates the image""" next_state = 'saving' fixture = {'name': 'fake image', 'disk_format': 'vmdk', 'min_ram': 2 ** 31 + 1, 'status': next_state} image = self.client.image_get(image_id=UUID2) current = image['status'] self.assertEqual('active', current) # image is in 'active' state so this should cause a failure. from_state = 'saving' self.assertRaises(exception.Invalid, self.client.image_update, image_id=UUID2, values=fixture, from_state=from_state) def _test_image_update_not_existing(self): """Tests non existing image update doesn't work""" fixture = self.get_fixture(status='bad status') self.assertRaises(exception.NotFound, self.client.image_update, image_id=_gen_uuid(), values=fixture) def test_image_destroy(self): """Tests that image metadata is deleted properly""" # Grab the original number of images orig_num_images = len(self.client.image_get_all()) # Delete image #2 image = self.FIXTURES[1] deleted_image = self.client.image_destroy(image_id=image['id']) self.assertTrue(deleted_image) self.assertEqual(image['id'], deleted_image['id']) self.assertTrue(deleted_image['deleted']) self.assertTrue(deleted_image['deleted_at']) # Verify one less image filters = {'deleted': False} new_num_images = len(self.client.image_get_all(filters=filters)) self.assertEqual(new_num_images, orig_num_images - 1) def test_image_destroy_not_existing(self): """Tests cannot delete non-existing image""" self.assertRaises(exception.NotFound, self.client.image_destroy, image_id=_gen_uuid()) def test_image_get_members(self): """Tests getting image members""" memb_list = self.client.image_member_find(image_id=UUID2) num_members = len(memb_list) self.assertEqual(0, num_members) def test_image_get_members_not_existing(self): """Tests getting non-existent image members""" self.assertRaises(exception.NotFound, self.client.image_get_members, image_id=_gen_uuid()) def test_image_member_find(self): """Tests getting member images""" memb_list = self.client.image_member_find(member='pattieblack') num_members = len(memb_list) self.assertEqual(0, num_members) def test_image_member_find_include_deleted(self): """Tests getting member images include the delted member""" values = dict(image_id=UUID2, member='pattieblack') # create a member member = self.client.image_member_create(values=values) memb_list = self.client.image_member_find(member='pattieblack') memb_list2 = self.client.image_member_find(member='pattieblack', include_deleted=True) self.assertEqual(1, len(memb_list)) self.assertEqual(1, len(memb_list2)) # delete the member self.client.image_member_delete(memb_id=member['id']) memb_list = self.client.image_member_find(member='pattieblack') memb_list2 = self.client.image_member_find(member='pattieblack', include_deleted=True) self.assertEqual(0, len(memb_list)) self.assertEqual(1, len(memb_list2)) # create it again member = self.client.image_member_create(values=values) memb_list = self.client.image_member_find(member='pattieblack') memb_list2 = self.client.image_member_find(member='pattieblack', include_deleted=True) self.assertEqual(1, len(memb_list)) self.assertEqual(2, len(memb_list2)) def test_add_update_members(self): """Tests updating image members""" values = dict(image_id=UUID2, member='pattieblack') member = self.client.image_member_create(values=values) self.assertTrue(member) values['member'] = 'pattieblack2' self.assertTrue(self.client.image_member_update(memb_id=member['id'], values=values)) def test_add_delete_member(self): """Tests deleting image members""" values = dict(image_id=UUID2, member='pattieblack') member = self.client.image_member_create(values=values) self.client.image_member_delete(memb_id=member['id']) memb_list = self.client.image_member_find(member='pattieblack') self.assertEqual(0, len(memb_list)) class TestRegistryV2ClientApi(base.IsolatedUnitTest): """Test proper actions made against a registry service. Test for both valid and invalid requests. """ def setUp(self): """Establish a clean test environment""" super(TestRegistryV2ClientApi, self).setUp() reload_module(rapi) def tearDown(self): """Clear the test environment""" super(TestRegistryV2ClientApi, self).tearDown() def test_configure_registry_client_not_using_use_user_token(self): self.config(use_user_token=False) with patch.object(rapi, 'configure_registry_admin_creds') as mock_rapi: rapi.configure_registry_client() mock_rapi.assert_called_once_with() def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'): return { 'user': 'user', 'password': 'password', 'username': 'user', 'tenant': 'tenant', 'auth_url': auth_url, 'strategy': strategy, 'region': 'region' } def test_configure_registry_admin_creds(self): expected = self._get_fake_config_creds(auth_url=None, strategy='configured_strategy') self.config(admin_user=expected['user']) self.config(admin_password=expected['password']) self.config(admin_tenant_name=expected['tenant']) self.config(auth_strategy=expected['strategy']) self.config(auth_region=expected['region']) self.stubs.Set(os, 'getenv', lambda x: None) self.assertIsNone(rapi._CLIENT_CREDS) rapi.configure_registry_admin_creds() self.assertEqual(expected, rapi._CLIENT_CREDS) def test_configure_registry_admin_creds_with_auth_url(self): expected = self._get_fake_config_creds() self.config(admin_user=expected['user']) self.config(admin_password=expected['password']) self.config(admin_tenant_name=expected['tenant']) self.config(auth_url=expected['auth_url']) self.config(auth_strategy='test_strategy') self.config(auth_region=expected['region']) self.assertIsNone(rapi._CLIENT_CREDS) rapi.configure_registry_admin_creds() self.assertEqual(expected, rapi._CLIENT_CREDS) glance-12.0.0/glance/tests/unit/v2/test_images_resource.py0000664000567000056710000050551612701407051024663 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import glance_store as store import mock from oslo_config import cfg from oslo_serialization import jsonutils import six # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import testtools import webob import glance.api.v2.image_actions import glance.api.v2.images from glance.common import exception from glance import domain import glance.schema from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils DATETIME = datetime.datetime(2012, 5, 16, 15, 27, 36, 325355) ISOTIME = '2012-05-16T15:27:36Z' CONF = cfg.CONF BASE_URI = unit_test_utils.BASE_URI UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' CHKSUM = '93264c3edf5972c9f1cb309543d38a5c' CHKSUM1 = '43254c3edf6972c9f1cb309543d38a8c' def _db_fixture(id, **kwargs): obj = { 'id': id, 'name': None, 'is_public': False, 'properties': {}, 'checksum': None, 'owner': None, 'status': 'queued', 'tags': [], 'size': None, 'virtual_size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'deleted': False, 'min_ram': None, 'min_disk': None, } obj.update(kwargs) return obj def _domain_fixture(id, **kwargs): properties = { 'image_id': id, 'name': None, 'visibility': 'private', 'checksum': None, 'owner': None, 'status': 'queued', 'size': None, 'virtual_size': None, 'locations': [], 'protected': False, 'disk_format': None, 'container_format': None, 'min_ram': None, 'min_disk': None, 'tags': [], } properties.update(kwargs) return glance.domain.Image(**properties) def _db_image_member_fixture(image_id, member_id, **kwargs): obj = { 'image_id': image_id, 'member': member_id, } obj.update(kwargs) return obj class TestImagesController(base.IsolatedUnitTest): def setUp(self): super(TestImagesController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self.store = unit_test_utils.FakeStoreAPI() for i in range(1, 4): self.store.data['%s/fake_location_%i' % (BASE_URI, i)] = ('Z', 1) self.store_utils = unit_test_utils.FakeStoreUtils(self.store) self._create_images() self._create_image_members() self.controller = glance.api.v2.images.ImagesController(self.db, self.policy, self.notifier, self.store) self.action_controller = (glance.api.v2.image_actions. ImageActionsController(self.db, self.policy, self.notifier, self.store)) self.controller.gateway.store_utils = self.store_utils store.create_stores() def _create_images(self): self.images = [ _db_fixture(UUID1, owner=TENANT1, checksum=CHKSUM, name='1', size=256, virtual_size=1024, is_public=True, locations=[{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}, 'status': 'active'}], disk_format='raw', container_format='bare', status='active'), _db_fixture(UUID2, owner=TENANT1, checksum=CHKSUM1, name='2', size=512, virtual_size=2048, is_public=True, disk_format='raw', container_format='bare', status='active', tags=['redhat', '64bit', 'power'], properties={'hypervisor_type': 'kvm', 'foo': 'bar', 'bar': 'foo'}), _db_fixture(UUID3, owner=TENANT3, checksum=CHKSUM1, name='3', size=512, virtual_size=2048, is_public=True, tags=['windows', '64bit', 'x86']), _db_fixture(UUID4, owner=TENANT4, name='4', size=1024, virtual_size=3072), ] [self.db.image_create(None, image) for image in self.images] self.db.image_tag_set_all(None, UUID1, ['ping', 'pong']) def _create_image_members(self): self.image_members = [ _db_image_member_fixture(UUID4, TENANT2), _db_image_member_fixture(UUID4, TENANT3, status='accepted'), ] [self.db.image_member_create(None, image_member) for image_member in self.image_members] def test_index(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID3]) self.assertEqual(expected, actual) def test_index_member_status_accepted(self): self.config(limit_param_default=5, api_limit_max=5) request = unit_test_utils.get_fake_request(tenant=TENANT2) output = self.controller.index(request) self.assertEqual(3, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1, UUID2, UUID3]) # can see only the public image self.assertEqual(expected, actual) request = unit_test_utils.get_fake_request(tenant=TENANT3) output = self.controller.index(request) self.assertEqual(4, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1, UUID2, UUID3, UUID4]) self.assertEqual(expected, actual) def test_index_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request) self.assertEqual(4, len(output['images'])) def test_index_admin_deleted_images_hidden(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete(request, UUID1) output = self.controller.index(request) self.assertEqual(3, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3, UUID4]) self.assertEqual(expected, actual) def test_index_return_parameters(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request, marker=UUID3, limit=1, sort_key=['created_at'], sort_dir=['desc']) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2]) self.assertEqual(actual, expected) self.assertEqual(UUID2, output['next_marker']) def test_index_next_marker(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request, marker=UUID3, limit=2) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID1]) self.assertEqual(expected, actual) self.assertEqual(UUID1, output['next_marker']) def test_index_no_next_marker(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request, marker=UUID1, limit=2) self.assertEqual(0, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([]) self.assertEqual(expected, actual) self.assertNotIn('next_marker', output) def test_index_with_id_filter(self): request = unit_test_utils.get_fake_request('/images?id=%s' % UUID1) output = self.controller.index(request, filters={'id': UUID1}) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1]) self.assertEqual(expected, actual) def test_index_with_checksum_filter_single_image(self): req = unit_test_utils.get_fake_request('/images?checksum=%s' % CHKSUM) output = self.controller.index(req, filters={'checksum': CHKSUM}) self.assertEqual(1, len(output['images'])) actual = list([image.image_id for image in output['images']]) expected = [UUID1] self.assertEqual(expected, actual) def test_index_with_checksum_filter_multiple_images(self): req = unit_test_utils.get_fake_request('/images?checksum=%s' % CHKSUM1) output = self.controller.index(req, filters={'checksum': CHKSUM1}) self.assertEqual(2, len(output['images'])) actual = list([image.image_id for image in output['images']]) expected = [UUID3, UUID2] self.assertEqual(expected, actual) def test_index_with_non_existent_checksum(self): req = unit_test_utils.get_fake_request('/images?checksum=236231827') output = self.controller.index(req, filters={'checksum': '236231827'}) self.assertEqual(0, len(output['images'])) def test_index_size_max_filter(self): request = unit_test_utils.get_fake_request('/images?size_max=512') output = self.controller.index(request, filters={'size_max': 512}) self.assertEqual(3, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1, UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_size_min_filter(self): request = unit_test_utils.get_fake_request('/images?size_min=512') output = self.controller.index(request, filters={'size_min': 512}) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_size_range_filter(self): path = '/images?size_min=512&size_max=512' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'size_min': 512, 'size_max': 512}) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_virtual_size_max_filter(self): ref = '/images?virtual_size_max=2048' request = unit_test_utils.get_fake_request(ref) output = self.controller.index(request, filters={'virtual_size_max': 2048}) self.assertEqual(3, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID1, UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_virtual_size_min_filter(self): ref = '/images?virtual_size_min=2048' request = unit_test_utils.get_fake_request(ref) output = self.controller.index(request, filters={'virtual_size_min': 2048}) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_virtual_size_range_filter(self): path = '/images?virtual_size_min=512&virtual_size_max=2048' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'virtual_size_min': 2048, 'virtual_size_max': 2048}) self.assertEqual(2, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID2, UUID3]) self.assertEqual(expected, actual) def test_index_with_invalid_max_range_filter_value(self): request = unit_test_utils.get_fake_request('/images?size_max=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, filters={'size_max': 'blah'}) def test_index_with_filters_return_many(self): path = '/images?status=queued' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'status': 'queued'}) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID3]) self.assertEqual(expected, actual) def test_index_with_nonexistent_name_filter(self): request = unit_test_utils.get_fake_request('/images?name=%s' % 'blah') images = self.controller.index(request, filters={'name': 'blah'})['images'] self.assertEqual(0, len(images)) def test_index_with_non_default_is_public_filter(self): image = _db_fixture(str(uuid.uuid4()), is_public=False, owner=TENANT3) self.db.image_create(None, image) path = '/images?visibility=private' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, filters={'visibility': 'private'}) self.assertEqual(2, len(output['images'])) def test_index_with_many_filters(self): url = '/images?status=queued&name=3' request = unit_test_utils.get_fake_request(url) output = self.controller.index(request, filters={ 'status': 'queued', 'name': '3', }) self.assertEqual(1, len(output['images'])) actual = set([image.image_id for image in output['images']]) expected = set([UUID3]) self.assertEqual(expected, actual) def test_index_with_marker(self): self.config(limit_param_default=1, api_limit_max=3) path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, marker=UUID3) actual = set([image.image_id for image in output['images']]) self.assertEqual(1, len(actual)) self.assertIn(UUID2, actual) def test_index_with_limit(self): path = '/images' limit = 2 request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, limit=limit) actual = set([image.image_id for image in output['images']]) self.assertEqual(limit, len(actual)) self.assertIn(UUID3, actual) self.assertIn(UUID2, actual) def test_index_greater_than_limit_max(self): self.config(limit_param_default=1, api_limit_max=3) path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, limit=4) actual = set([image.image_id for image in output['images']]) self.assertEqual(3, len(actual)) self.assertNotIn(output['next_marker'], output) def test_index_default_limit(self): self.config(limit_param_default=1, api_limit_max=3) path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request) actual = set([image.image_id for image in output['images']]) self.assertEqual(1, len(actual)) def test_index_with_sort_dir(self): path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, sort_dir=['asc'], limit=3) actual = [image.image_id for image in output['images']] self.assertEqual(3, len(actual)) self.assertEqual(UUID1, actual[0]) self.assertEqual(UUID2, actual[1]) self.assertEqual(UUID3, actual[2]) def test_index_with_sort_key(self): path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, sort_key=['created_at'], limit=3) actual = [image.image_id for image in output['images']] self.assertEqual(3, len(actual)) self.assertEqual(UUID3, actual[0]) self.assertEqual(UUID2, actual[1]) self.assertEqual(UUID1, actual[2]) def test_index_with_multiple_sort_keys(self): path = '/images' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, sort_key=['created_at', 'name'], limit=3) actual = [image.image_id for image in output['images']] self.assertEqual(3, len(actual)) self.assertEqual(UUID3, actual[0]) self.assertEqual(UUID2, actual[1]) self.assertEqual(UUID1, actual[2]) def test_index_with_marker_not_found(self): fake_uuid = str(uuid.uuid4()) path = '/images' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, marker=fake_uuid) def test_index_invalid_sort_key(self): path = '/images' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, sort_key=['foo']) def test_index_zero_images(self): self.db.reset() request = unit_test_utils.get_fake_request() output = self.controller.index(request) self.assertEqual([], output['images']) def test_index_with_tags(self): path = '/images?tag=64bit' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['64bit']}) actual = [image.tags for image in output['images']] self.assertEqual(2, len(actual)) self.assertIn('64bit', actual[0]) self.assertIn('64bit', actual[1]) def test_index_with_multi_tags(self): path = '/images?tag=power&tag=64bit' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['power', '64bit']}) actual = [image.tags for image in output['images']] self.assertEqual(1, len(actual)) self.assertIn('64bit', actual[0]) self.assertIn('power', actual[0]) def test_index_with_multi_tags_and_nonexistent(self): path = '/images?tag=power&tag=fake' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['power', 'fake']}) actual = [image.tags for image in output['images']] self.assertEqual(0, len(actual)) def test_index_with_tags_and_properties(self): path = '/images?tag=64bit&hypervisor_type=kvm' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['64bit'], 'hypervisor_type': 'kvm'}) tags = [image.tags for image in output['images']] properties = [image.extra_properties for image in output['images']] self.assertEqual(len(tags), len(properties)) self.assertIn('64bit', tags[0]) self.assertEqual('kvm', properties[0]['hypervisor_type']) def test_index_with_multiple_properties(self): path = '/images?foo=bar&hypervisor_type=kvm' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'foo': 'bar', 'hypervisor_type': 'kvm'}) properties = [image.extra_properties for image in output['images']] self.assertEqual('kvm', properties[0]['hypervisor_type']) self.assertEqual('bar', properties[0]['foo']) def test_index_with_core_and_extra_property(self): path = '/images?disk_format=raw&foo=bar' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'foo': 'bar', 'disk_format': 'raw'}) properties = [image.extra_properties for image in output['images']] self.assertEqual(1, len(output['images'])) self.assertEqual('raw', output['images'][0].disk_format) self.assertEqual('bar', properties[0]['foo']) def test_index_with_nonexistent_properties(self): path = '/images?abc=xyz&pudding=banana' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'abc': 'xyz', 'pudding': 'banana'}) self.assertEqual(0, len(output['images'])) def test_index_with_non_existent_tags(self): path = '/images?tag=fake' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request, filters={'tags': ['fake']}) actual = [image.tags for image in output['images']] self.assertEqual(0, len(actual)) def test_show(self): request = unit_test_utils.get_fake_request() output = self.controller.show(request, image_id=UUID2) self.assertEqual(UUID2, output.image_id) self.assertEqual('2', output.name) def test_show_deleted_properties(self): """Ensure that the api filters out deleted image properties.""" # get the image properties into the odd state image = { 'id': str(uuid.uuid4()), 'status': 'active', 'properties': {'poo': 'bear'}, } self.db.image_create(None, image) self.db.image_update(None, image['id'], {'properties': {'yin': 'yang'}}, purge_props=True) request = unit_test_utils.get_fake_request() output = self.controller.show(request, image['id']) self.assertEqual('yang', output.extra_properties['yin']) def test_show_non_existent(self): request = unit_test_utils.get_fake_request() image_id = str(uuid.uuid4()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, image_id) def test_show_deleted_image_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete(request, UUID1) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID1) def test_show_not_allowed(self): request = unit_test_utils.get_fake_request() self.assertEqual(TENANT1, request.context.tenant) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, request, UUID4) def test_create(self): request = unit_test_utils.get_fake_request() image = {'name': 'image-1'} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual('image-1', output.name) self.assertEqual({}, output.extra_properties) self.assertEqual(set([]), output.tags) self.assertEqual('private', output.visibility) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual('image-1', output_log['payload']['name']) def test_create_disabled_notification(self): self.config(disabled_notifications=["image.create"]) request = unit_test_utils.get_fake_request() image = {'name': 'image-1'} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual('image-1', output.name) self.assertEqual({}, output.extra_properties) self.assertEqual(set([]), output.tags) self.assertEqual('private', output.visibility) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_create_with_properties(self): request = unit_test_utils.get_fake_request() image_properties = {'foo': 'bar'} image = {'name': 'image-1'} output = self.controller.create(request, image=image, extra_properties=image_properties, tags=[]) self.assertEqual('image-1', output.name) self.assertEqual(image_properties, output.extra_properties) self.assertEqual(set([]), output.tags) self.assertEqual('private', output.visibility) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual('image-1', output_log['payload']['name']) def test_create_with_too_many_properties(self): self.config(image_property_quota=1) request = unit_test_utils.get_fake_request() image_properties = {'foo': 'bar', 'foo2': 'bar'} image = {'name': 'image-1'} self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, request, image=image, extra_properties=image_properties, tags=[]) def test_create_with_bad_min_disk_size(self): request = unit_test_utils.get_fake_request() image = {'min_disk': -42, 'name': 'image-1'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image=image, extra_properties={}, tags=[]) def test_create_with_bad_min_ram_size(self): request = unit_test_utils.get_fake_request() image = {'min_ram': -42, 'name': 'image-1'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image=image, extra_properties={}, tags=[]) def test_create_public_image_as_admin(self): request = unit_test_utils.get_fake_request() image = {'name': 'image-1', 'visibility': 'public'} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual('public', output.visibility) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual(output.image_id, output_log['payload']['id']) def test_create_dup_id(self): request = unit_test_utils.get_fake_request() image = {'image_id': UUID4} self.assertRaises(webob.exc.HTTPConflict, self.controller.create, request, image=image, extra_properties={}, tags=[]) def test_create_duplicate_tags(self): request = unit_test_utils.get_fake_request() tags = ['ping', 'ping'] output = self.controller.create(request, image={}, extra_properties={}, tags=tags) self.assertEqual(set(['ping']), output.tags) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.create', output_log['event_type']) self.assertEqual(output.image_id, output_log['payload']['id']) def test_create_with_too_many_tags(self): self.config(image_tag_quota=1) request = unit_test_utils.get_fake_request() tags = ['ping', 'pong'] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.create, request, image={}, extra_properties={}, tags=tags) def test_create_with_owner_non_admin(self): request = unit_test_utils.get_fake_request() request.context.is_admin = False image = {'owner': '12345'} self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image=image, extra_properties={}, tags=[]) request = unit_test_utils.get_fake_request() request.context.is_admin = False image = {'owner': TENANT1} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual(TENANT1, output.owner) def test_create_with_owner_admin(self): request = unit_test_utils.get_fake_request() request.context.is_admin = True image = {'owner': '12345'} output = self.controller.create(request, image=image, extra_properties={}, tags=[]) self.assertEqual('12345', output.owner) def test_create_with_duplicate_location(self): request = unit_test_utils.get_fake_request() location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} image = {'name': 'image-1', 'locations': [location, location]} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image=image, extra_properties={}, tags=[]) def test_create_unexpected_property(self): request = unit_test_utils.get_fake_request() image_properties = {'unexpected': 'unexpected'} image = {'name': 'image-1'} with mock.patch.object(domain.ImageFactory, 'new_image', side_effect=TypeError): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, request, image=image, extra_properties=image_properties, tags=[]) def test_create_reserved_property(self): request = unit_test_utils.get_fake_request() image_properties = {'reserved': 'reserved'} image = {'name': 'image-1'} with mock.patch.object(domain.ImageFactory, 'new_image', side_effect=exception.ReservedProperty( property='reserved')): self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image=image, extra_properties=image_properties, tags=[]) def test_create_readonly_property(self): request = unit_test_utils.get_fake_request() image_properties = {'readonly': 'readonly'} image = {'name': 'image-1'} with mock.patch.object(domain.ImageFactory, 'new_image', side_effect=exception.ReadonlyProperty( property='readonly')): self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image=image, extra_properties=image_properties, tags=[]) def test_update_no_changes(self): request = unit_test_utils.get_fake_request() output = self.controller.update(request, UUID1, changes=[]) self.assertEqual(UUID1, output.image_id) self.assertEqual(output.created_at, output.updated_at) self.assertEqual(2, len(output.tags)) self.assertIn('ping', output.tags) self.assertIn('pong', output.tags) output_logs = self.notifier.get_logs() # NOTE(markwash): don't send a notification if nothing is updated self.assertEqual(0, len(output_logs)) def test_update_with_bad_min_disk(self): request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['min_disk'], 'value': -42}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes=changes) def test_update_with_bad_min_ram(self): request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['min_ram'], 'value': -42}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes=changes) def test_update_image_doesnt_exist(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, request, str(uuid.uuid4()), changes=[]) def test_update_deleted_image_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete(request, UUID1) self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, request, UUID1, changes=[]) def test_update_with_too_many_properties(self): self.config(show_multiple_locations=True) self.config(user_storage_quota='1') new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes=changes) def test_update_replace_base_attribute(self): self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) request = unit_test_utils.get_fake_request() request.context.is_admin = True changes = [{'op': 'replace', 'path': ['name'], 'value': 'fedora'}, {'op': 'replace', 'path': ['owner'], 'value': TENANT3}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual('fedora', output.name) self.assertEqual(TENANT3, output.owner) self.assertEqual({'foo': 'bar'}, output.extra_properties) self.assertNotEqual(output.created_at, output.updated_at) def test_update_replace_onwer_non_admin(self): request = unit_test_utils.get_fake_request() request.context.is_admin = False changes = [{'op': 'replace', 'path': ['owner'], 'value': TENANT3}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_replace_tags(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.tags)) self.assertIn('king', output.tags) self.assertIn('kong', output.tags) self.assertNotEqual(output.created_at, output.updated_at) def test_update_replace_property(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar', 'snitch': 'golden'} self.db.image_update(None, UUID1, {'properties': properties}) output = self.controller.show(request, UUID1) self.assertEqual('bar', output.extra_properties['foo']) self.assertEqual('golden', output.extra_properties['snitch']) changes = [ {'op': 'replace', 'path': ['foo'], 'value': 'baz'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual('baz', output.extra_properties['foo']) self.assertEqual('golden', output.extra_properties['snitch']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_add_too_many_properties(self): self.config(image_property_quota=1) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_add_and_remove_too_many_properties(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] self.controller.update(request, UUID1, changes) self.config(image_property_quota=1) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['foo']}, {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, ] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_add_unlimited_properties(self): self.config(image_property_quota=-1) request = unit_test_utils.get_fake_request() output = self.controller.show(request, UUID1) changes = [{'op': 'add', 'path': ['foo'], 'value': 'bar'}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertNotEqual(output.created_at, output.updated_at) def test_update_format_properties(self): statuses_for_immutability = ['active', 'saving', 'killed'] request = unit_test_utils.get_fake_request(is_admin=True) for status in statuses_for_immutability: image = { 'id': str(uuid.uuid4()), 'status': status, 'disk_format': 'ari', 'container_format': 'ari', } self.db.image_create(None, image) changes = [ {'op': 'replace', 'path': ['disk_format'], 'value': 'ami'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, image['id'], changes) changes = [ {'op': 'replace', 'path': ['container_format'], 'value': 'ami'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, image['id'], changes) self.db.image_update(None, image['id'], {'status': 'queued'}) changes = [ {'op': 'replace', 'path': ['disk_format'], 'value': 'raw'}, {'op': 'replace', 'path': ['container_format'], 'value': 'bare'}, ] resp = self.controller.update(request, image['id'], changes) self.assertEqual('raw', resp.disk_format) self.assertEqual('bare', resp.container_format) def test_update_remove_property_while_over_limit(self): """Ensure that image properties can be removed. Image properties should be able to be removed as long as the image has fewer than the limited number of image properties after the transaction. """ request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, ] self.controller.update(request, UUID1, changes) self.config(image_property_quota=1) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['foo']}, {'op': 'remove', 'path': ['snitch']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(1, len(output.extra_properties)) self.assertEqual('buzz', output.extra_properties['fizz']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_add_and_remove_property_under_limit(self): """Ensure that image properties can be removed. Image properties should be able to be added and removed simultaneously as long as the image has fewer than the limited number of image properties after the transaction. """ request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] self.controller.update(request, UUID1, changes) self.config(image_property_quota=1) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['foo']}, {'op': 'remove', 'path': ['snitch']}, {'op': 'add', 'path': ['fizz'], 'value': 'buzz'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(1, len(output.extra_properties)) self.assertEqual('buzz', output.extra_properties['fizz']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_replace_missing_property(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': 'foo', 'value': 'baz'}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_prop_protection_with_create_and_permitted_role(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} created_image = self.controller.create(request, image=image, extra_properties={}, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'add', 'path': ['x_owner_foo'], 'value': 'bar'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('bar', output.extra_properties['x_owner_foo']) def test_prop_protection_with_update_and_permitted_policy(self): self.set_property_protections(use_policies=True) enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) request = unit_test_utils.get_fake_request(roles=['spl_role']) image = {'name': 'image-1'} extra_props = {'spl_creator_policy': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) self.assertEqual('bar', created_image.extra_properties['spl_creator_policy']) another_request = unit_test_utils.get_fake_request(roles=['spl_role']) changes = [ {'op': 'replace', 'path': ['spl_creator_policy'], 'value': 'par'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) another_request = unit_test_utils.get_fake_request(roles=['admin']) output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('par', output.extra_properties['spl_creator_policy']) def test_prop_protection_with_create_with_patch_and_policy(self): self.set_property_protections(use_policies=True) enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) request = unit_test_utils.get_fake_request(roles=['spl_role', 'admin']) image = {'name': 'image-1'} extra_props = {'spl_default_policy': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) changes = [ {'op': 'add', 'path': ['spl_creator_policy'], 'value': 'bar'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) another_request = unit_test_utils.get_fake_request(roles=['spl_role']) output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('bar', output.extra_properties['spl_creator_policy']) def test_prop_protection_with_create_and_unpermitted_role(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} created_image = self.controller.create(request, image=image, extra_properties={}, tags=[]) roles = ['fake_member'] another_request = unit_test_utils.get_fake_request(roles=roles) changes = [ {'op': 'add', 'path': ['x_owner_foo'], 'value': 'bar'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) def test_prop_protection_with_show_and_permitted_role(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) output = self.controller.show(another_request, created_image.image_id) self.assertEqual('bar', output.extra_properties['x_owner_foo']) def test_prop_protection_with_show_and_unpermitted_role(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['member']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) output = self.controller.show(another_request, created_image.image_id) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_owner_foo') def test_prop_protection_with_update_and_permitted_role(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'replace', 'path': ['x_owner_foo'], 'value': 'baz'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('baz', output.extra_properties['x_owner_foo']) def test_prop_protection_with_update_and_unpermitted_role(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) changes = [ {'op': 'replace', 'path': ['x_owner_foo'], 'value': 'baz'}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, another_request, created_image.image_id, changes) def test_prop_protection_with_delete_and_permitted_role(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'remove', 'path': ['x_owner_foo']} ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_owner_foo') def test_prop_protection_with_delete_and_unpermitted_role(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_owner_foo': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) changes = [ {'op': 'remove', 'path': ['x_owner_foo']} ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, another_request, created_image.image_id, changes) def test_create_protected_prop_case_insensitive(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} created_image = self.controller.create(request, image=image, extra_properties={}, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'add', 'path': ['x_case_insensitive'], 'value': '1'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('1', output.extra_properties['x_case_insensitive']) def test_read_protected_prop_case_insensitive(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_case_insensitive': '1'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) output = self.controller.show(another_request, created_image.image_id) self.assertEqual('1', output.extra_properties['x_case_insensitive']) def test_update_protected_prop_case_insensitive(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_case_insensitive': '1'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'replace', 'path': ['x_case_insensitive'], 'value': '2'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('2', output.extra_properties['x_case_insensitive']) def test_delete_protected_prop_case_insensitive(self): enforcer = glance.api.policy.Enforcer() self.controller = glance.api.v2.images.ImagesController(self.db, enforcer, self.notifier, self.store) self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_case_insensitive': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'remove', 'path': ['x_case_insensitive']} ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_case_insensitive') def test_create_non_protected_prop(self): """Property marked with special char @ creatable by an unknown role""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_all_permitted_1': '1'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) self.assertEqual('1', created_image.extra_properties['x_all_permitted_1']) another_request = unit_test_utils.get_fake_request(roles=['joe_soap']) extra_props = {'x_all_permitted_2': '2'} created_image = self.controller.create(another_request, image=image, extra_properties=extra_props, tags=[]) self.assertEqual('2', created_image.extra_properties['x_all_permitted_2']) def test_read_non_protected_prop(self): """Property marked with special char @ readable by an unknown role""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_all_permitted': '1'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['joe_soap']) output = self.controller.show(another_request, created_image.image_id) self.assertEqual('1', output.extra_properties['x_all_permitted']) def test_update_non_protected_prop(self): """Property marked with special char @ updatable by an unknown role""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_all_permitted': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['joe_soap']) changes = [ {'op': 'replace', 'path': ['x_all_permitted'], 'value': 'baz'}, ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertEqual('baz', output.extra_properties['x_all_permitted']) def test_delete_non_protected_prop(self): """Property marked with special char @ deletable by an unknown role""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_all_permitted': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['member']) changes = [ {'op': 'remove', 'path': ['x_all_permitted']} ] output = self.controller.update(another_request, created_image.image_id, changes) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_all_permitted') def test_create_locked_down_protected_prop(self): """Property marked with special char ! creatable by no one""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} created_image = self.controller.create(request, image=image, extra_properties={}, tags=[]) roles = ['fake_member'] another_request = unit_test_utils.get_fake_request(roles=roles) changes = [ {'op': 'add', 'path': ['x_none_permitted'], 'value': 'bar'}, ] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, another_request, created_image.image_id, changes) def test_read_locked_down_protected_prop(self): """Property marked with special char ! readable by no one""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['member']) image = {'name': 'image-1'} extra_props = {'x_none_read': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) output = self.controller.show(another_request, created_image.image_id) self.assertRaises(KeyError, output.extra_properties.__getitem__, 'x_none_read') def test_update_locked_down_protected_prop(self): """Property marked with special char ! updatable by no one""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_none_update': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) changes = [ {'op': 'replace', 'path': ['x_none_update'], 'value': 'baz'}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, another_request, created_image.image_id, changes) def test_delete_locked_down_protected_prop(self): """Property marked with special char ! deletable by no one""" self.set_property_protections() request = unit_test_utils.get_fake_request(roles=['admin']) image = {'name': 'image-1'} extra_props = {'x_none_delete': 'bar'} created_image = self.controller.create(request, image=image, extra_properties=extra_props, tags=[]) another_request = unit_test_utils.get_fake_request(roles=['fake_role']) changes = [ {'op': 'remove', 'path': ['x_none_delete']} ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, another_request, created_image.image_id, changes) def test_update_replace_locations_non_empty(self): self.config(show_multiple_locations=True) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': [new_location]}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_replace_locations_metadata_update(self): self.config(show_multiple_locations=True) location = {'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {'a': 1}} request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': [location]}] output = self.controller.update(request, UUID1, changes) self.assertEqual({'a': 1}, output.locations[0]['metadata']) def test_locations_actions_with_locations_invisible(self): self.config(show_multiple_locations=False) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': [new_location]}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_replace_locations_invalid(self): request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_add_property(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['foo'], 'value': 'baz'}, {'op': 'add', 'path': ['snitch'], 'value': 'golden'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual('baz', output.extra_properties['foo']) self.assertEqual('golden', output.extra_properties['snitch']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_add_base_property_json_schema_version_4(self): request = unit_test_utils.get_fake_request() changes = [{ 'json_schema_version': 4, 'op': 'add', 'path': ['name'], 'value': 'fedora' }] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_update_add_extra_property_json_schema_version_4(self): self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) request = unit_test_utils.get_fake_request() changes = [{ 'json_schema_version': 4, 'op': 'add', 'path': ['foo'], 'value': 'baz' }] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_update_add_base_property_json_schema_version_10(self): request = unit_test_utils.get_fake_request() changes = [{ 'json_schema_version': 10, 'op': 'add', 'path': ['name'], 'value': 'fedora' }] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual('fedora', output.name) def test_update_add_extra_property_json_schema_version_10(self): self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) request = unit_test_utils.get_fake_request() changes = [{ 'json_schema_version': 10, 'op': 'add', 'path': ['foo'], 'value': 'baz' }] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual({'foo': 'baz'}, output.extra_properties) def test_update_add_property_already_present_json_schema_version_4(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar'} self.db.image_update(None, UUID1, {'properties': properties}) output = self.controller.show(request, UUID1) self.assertEqual('bar', output.extra_properties['foo']) changes = [ {'json_schema_version': 4, 'op': 'add', 'path': ['foo'], 'value': 'baz'}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_update_add_property_already_present_json_schema_version_10(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar'} self.db.image_update(None, UUID1, {'properties': properties}) output = self.controller.show(request, UUID1) self.assertEqual('bar', output.extra_properties['foo']) changes = [ {'json_schema_version': 10, 'op': 'add', 'path': ['foo'], 'value': 'baz'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual({'foo': 'baz'}, output.extra_properties) def test_update_add_locations(self): self.config(show_multiple_locations=True) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.locations)) self.assertEqual(new_location, output.locations[1]) def test_update_add_locations_insertion(self): self.config(show_multiple_locations=True) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '0'], 'value': new_location}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.locations)) self.assertEqual(new_location, output.locations[0]) def test_update_add_locations_list(self): self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': {'url': 'foo', 'metadata': {}}}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_add_locations_invalid(self): self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': {'url': 'unknow://foo', 'metadata': {}}}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) changes = [{'op': 'add', 'path': ['locations', None], 'value': {'url': 'unknow://foo', 'metadata': {}}}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_add_duplicate_locations(self): self.config(show_multiple_locations=True) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.locations)) self.assertEqual(new_location, output.locations[1]) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_add_too_many_locations(self): self.config(show_multiple_locations=True) self.config(image_location_quota=1) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}}}, ] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_add_and_remove_too_many_locations(self): self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}}}, ] self.controller.update(request, UUID1, changes) self.config(image_location_quota=1) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['locations', '0']}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_3' % BASE_URI, 'metadata': {}}}, ] self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, request, UUID1, changes) def test_update_add_unlimited_locations(self): self.config(show_multiple_locations=True) self.config(image_location_quota=-1) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertNotEqual(output.created_at, output.updated_at) def test_update_remove_location_while_over_limit(self): """Ensure that image locations can be removed. Image locations should be able to be removed as long as the image has fewer than the limited number of image locations after the transaction. """ self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}}}, ] self.controller.update(request, UUID1, changes) self.config(image_location_quota=1) self.config(show_multiple_locations=True) # We must remove two locations to avoid being over # the limit of 1 location changes = [ {'op': 'remove', 'path': ['locations', '0']}, {'op': 'remove', 'path': ['locations', '0']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(1, len(output.locations)) self.assertIn('fake_location_2', output.locations[0]['url']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_add_and_remove_location_under_limit(self): """Ensure that image locations can be removed. Image locations should be able to be added and removed simultaneously as long as the image has fewer than the limited number of image locations after the transaction. """ self.stubs.Set(store, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_1' % BASE_URI, 'metadata': {}}}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_2' % BASE_URI, 'metadata': {}}}, ] self.controller.update(request, UUID1, changes) self.config(image_location_quota=2) # We must remove two properties to avoid being # over the limit of 1 property changes = [ {'op': 'remove', 'path': ['locations', '0']}, {'op': 'remove', 'path': ['locations', '0']}, {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location_3' % BASE_URI, 'metadata': {}}}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(2, len(output.locations)) self.assertIn('fake_location_3', output.locations[1]['url']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_remove_base_property(self): self.db.image_update(None, UUID1, {'properties': {'foo': 'bar'}}) request = unit_test_utils.get_fake_request() changes = [{'op': 'remove', 'path': ['name']}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_remove_property(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar', 'snitch': 'golden'} self.db.image_update(None, UUID1, {'properties': properties}) output = self.controller.show(request, UUID1) self.assertEqual('bar', output.extra_properties['foo']) self.assertEqual('golden', output.extra_properties['snitch']) changes = [ {'op': 'remove', 'path': ['snitch']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual({'foo': 'bar'}, output.extra_properties) self.assertNotEqual(output.created_at, output.updated_at) def test_update_remove_missing_property(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'remove', 'path': ['foo']}, ] self.assertRaises(webob.exc.HTTPConflict, self.controller.update, request, UUID1, changes) def test_update_remove_location(self): self.config(show_multiple_locations=True) self.stubs.Set(store, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) request = unit_test_utils.get_fake_request() new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] self.controller.update(request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', '0']}] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(1, len(output.locations)) self.assertEqual('active', output.status) def test_update_remove_location_invalid_pos(self): self.config(show_multiple_locations=True) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location' % BASE_URI, 'metadata': {}}}] self.controller.update(request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', None]}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', '-1']}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', '99']}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', 'x']}] self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, request, UUID1, changes) def test_update_remove_location_store_exception(self): self.config(show_multiple_locations=True) def fake_delete_image_location_from_backend(self, *args, **kwargs): raise Exception('fake_backend_exception') self.stubs.Set(self.store_utils, 'delete_image_location_from_backend', fake_delete_image_location_from_backend) request = unit_test_utils.get_fake_request() changes = [ {'op': 'add', 'path': ['locations', '-'], 'value': {'url': '%s/fake_location' % BASE_URI, 'metadata': {}}}] self.controller.update(request, UUID1, changes) changes = [{'op': 'remove', 'path': ['locations', '0']}] self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.update, request, UUID1, changes) def test_update_multiple_changes(self): request = unit_test_utils.get_fake_request() properties = {'foo': 'bar', 'snitch': 'golden'} self.db.image_update(None, UUID1, {'properties': properties}) changes = [ {'op': 'replace', 'path': ['min_ram'], 'value': 128}, {'op': 'replace', 'path': ['foo'], 'value': 'baz'}, {'op': 'remove', 'path': ['snitch']}, {'op': 'add', 'path': ['kb'], 'value': 'dvorak'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(UUID1, output.image_id) self.assertEqual(128, output.min_ram) self.addDetail('extra_properties', testtools.content.json_content( jsonutils.dumps(output.extra_properties))) self.assertEqual(2, len(output.extra_properties)) self.assertEqual('baz', output.extra_properties['foo']) self.assertEqual('dvorak', output.extra_properties['kb']) self.assertNotEqual(output.created_at, output.updated_at) def test_update_invalid_operation(self): request = unit_test_utils.get_fake_request() change = {'op': 'test', 'path': 'options', 'value': 'puts'} try: self.controller.update(request, UUID1, [change]) except AttributeError: pass # AttributeError is the desired behavior else: self.fail('Failed to raise AssertionError on %s' % change) def test_update_duplicate_tags(self): request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': ['tags'], 'value': ['ping', 'ping']}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual(1, len(output.tags)) self.assertIn('ping', output.tags) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('image.update', output_log['event_type']) self.assertEqual(UUID1, output_log['payload']['id']) def test_update_disabled_notification(self): self.config(disabled_notifications=["image.update"]) request = unit_test_utils.get_fake_request() changes = [ {'op': 'replace', 'path': ['name'], 'value': 'Ping Pong'}, ] output = self.controller.update(request, UUID1, changes) self.assertEqual('Ping Pong', output.name) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) def test_delete(self): request = unit_test_utils.get_fake_request() self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) try: self.controller.delete(request, UUID1) output_logs = self.notifier.get_logs() self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual("image.delete", output_log['event_type']) except Exception as e: self.fail("Delete raised exception: %s" % e) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('deleted', deleted_img['status']) self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delete_disabled_notification(self): self.config(disabled_notifications=["image.delete"]) request = unit_test_utils.get_fake_request() self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) try: self.controller.delete(request, UUID1) output_logs = self.notifier.get_logs() self.assertEqual(0, len(output_logs)) except Exception as e: self.fail("Delete raised exception: %s" % e) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('deleted', deleted_img['status']) self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delete_queued_updates_status(self): """Ensure status of queued image is updated (LP bug #1048851)""" request = unit_test_utils.get_fake_request(is_admin=True) image = self.db.image_create(request.context, {'status': 'queued'}) image_id = image['id'] self.controller.delete(request, image_id) image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) def test_delete_queued_updates_status_delayed_delete(self): """Ensure status of queued image is updated (LP bug #1048851). Must be set to 'deleted' when delayed_delete isenabled. """ self.config(delayed_delete=True) request = unit_test_utils.get_fake_request(is_admin=True) image = self.db.image_create(request.context, {'status': 'queued'}) image_id = image['id'] self.controller.delete(request, image_id) image = self.db.image_get(request.context, image_id, force_show_deleted=True) self.assertTrue(image['deleted']) self.assertEqual('deleted', image['status']) def test_delete_not_in_store(self): request = unit_test_utils.get_fake_request() self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) for k in self.store.data: if UUID1 in k: del self.store.data[k] break self.controller.delete(request, UUID1) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('deleted', deleted_img['status']) self.assertNotIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delayed_delete(self): self.config(delayed_delete=True) request = unit_test_utils.get_fake_request() self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) self.controller.delete(request, UUID1) deleted_img = self.db.image_get(request.context, UUID1, force_show_deleted=True) self.assertTrue(deleted_img['deleted']) self.assertEqual('pending_delete', deleted_img['status']) self.assertIn('%s/%s' % (BASE_URI, UUID1), self.store.data) def test_delete_non_existent(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, str(uuid.uuid4())) def test_delete_already_deleted_image_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) self.controller.delete(request, UUID1) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, UUID1) def test_delete_not_allowed(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, request, UUID4) def test_delete_in_use(self): def fake_safe_delete_from_backend(self, *args, **kwargs): raise store.exceptions.InUseByStore() self.stubs.Set(self.store_utils, 'safe_delete_from_backend', fake_safe_delete_from_backend) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, request, UUID1) def test_delete_has_snapshot(self): def fake_safe_delete_from_backend(self, *args, **kwargs): raise store.exceptions.HasSnapshot() self.stubs.Set(self.store_utils, 'safe_delete_from_backend', fake_safe_delete_from_backend) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, request, UUID1) def test_delete_to_unallowed_status(self): # from deactivated to pending-delete self.config(delayed_delete=True) request = unit_test_utils.get_fake_request(is_admin=True) self.action_controller.deactivate(request, UUID1) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, request, UUID1) def test_index_with_invalid_marker(self): fake_uuid = str(uuid.uuid4()) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, marker=fake_uuid) def test_invalid_locations_op_pos(self): pos = self.controller._get_locations_op_pos(None, 2, True) self.assertIsNone(pos) pos = self.controller._get_locations_op_pos('1', None, True) self.assertIsNone(pos) class TestImagesControllerPolicies(base.IsolatedUnitTest): def setUp(self): super(TestImagesControllerPolicies, self).setUp() self.db = unit_test_utils.FakeDB() self.policy = unit_test_utils.FakePolicyEnforcer() self.controller = glance.api.v2.images.ImagesController(self.db, self.policy) store = unit_test_utils.FakeStoreAPI() self.store_utils = unit_test_utils.FakeStoreUtils(store) def test_index_unauthorized(self): rules = {"get_images": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, request) def test_show_unauthorized(self): rules = {"get_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, request, image_id=UUID2) def test_create_image_unauthorized(self): rules = {"add_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() image = {'name': 'image-1'} extra_properties = {} tags = [] self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image, extra_properties, tags) def test_create_public_image_unauthorized(self): rules = {"publicize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() image = {'name': 'image-1', 'visibility': 'public'} extra_properties = {} tags = [] self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, image, extra_properties, tags) def test_update_unauthorized(self): rules = {"modify_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['name'], 'value': 'image-2'}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_publicize_image_unauthorized(self): rules = {"publicize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['visibility'], 'value': 'public'}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_depublicize_image_unauthorized(self): rules = {"publicize_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['visibility'], 'value': 'private'}] output = self.controller.update(request, UUID1, changes) self.assertEqual('private', output.visibility) def test_update_get_image_location_unauthorized(self): rules = {"get_image_location": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_set_image_location_unauthorized(self): def fake_delete_image_location_from_backend(self, *args, **kwargs): pass rules = {"set_image_location": False} self.policy.set_rules(rules) new_location = {'url': '%s/fake_location' % BASE_URI, 'metadata': {}} request = unit_test_utils.get_fake_request() changes = [{'op': 'add', 'path': ['locations', '-'], 'value': new_location}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_update_delete_image_location_unauthorized(self): rules = {"delete_image_location": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() changes = [{'op': 'replace', 'path': ['locations'], 'value': []}] self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, request, UUID1, changes) def test_delete_unauthorized(self): rules = {"delete_image": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, request, UUID1) class TestImagesDeserializer(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializer, self).setUp() self.deserializer = glance.api.v2.images.RequestDeserializer() def test_create_minimal(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({}) output = self.deserializer.create(request) expected = {'image': {}, 'extra_properties': {}, 'tags': []} self.assertEqual(expected, output) def test_create_invalid_id(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'id': 'gabe'}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_id_to_image_id(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'id': UUID4}) output = self.deserializer.create(request) expected = {'image': {'image_id': UUID4}, 'extra_properties': {}, 'tags': []} self.assertEqual(expected, output) def test_create_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create_full(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'id': UUID3, 'name': 'image-1', 'visibility': 'public', 'tags': ['one', 'two'], 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'foo': 'bar', 'protected': True, }) output = self.deserializer.create(request) properties = { 'image_id': UUID3, 'name': 'image-1', 'visibility': 'public', 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'protected': True, } self.maxDiff = None expected = {'image': properties, 'extra_properties': {'foo': 'bar'}, 'tags': ['one', 'two']} self.assertEqual(expected, output) def test_create_readonly_attributes_forbidden(self): bodies = [ {'direct_url': 'http://example.com'}, {'self': 'http://example.com'}, {'file': 'http://example.com'}, {'schema': 'http://example.com'}, ] for body in bodies: request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPForbidden, self.deserializer.create, request) def _get_fake_patch_request(self, content_type_minor_version=1): request = unit_test_utils.get_fake_request() template = 'application/openstack-images-v2.%d-json-patch' request.content_type = template % content_type_minor_version return request def test_update_empty_body(self): request = self._get_fake_patch_request() request.body = jsonutils.dump_as_bytes([]) output = self.deserializer.update(request) expected = {'changes': []} self.assertEqual(expected, output) def test_update_unsupported_content_type(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/json-patch' request.body = jsonutils.dump_as_bytes([]) try: self.deserializer.update(request) except webob.exc.HTTPUnsupportedMediaType as e: # desired result, but must have correct Accept-Patch header accept_patch = ['application/openstack-images-v2.1-json-patch', 'application/openstack-images-v2.0-json-patch'] expected = ', '.join(sorted(accept_patch)) self.assertEqual(expected, e.headers['Accept-Patch']) else: self.fail('Did not raise HTTPUnsupportedMediaType') def test_update_body_not_a_list(self): bodies = [ {'op': 'add', 'path': '/someprop', 'value': 'somevalue'}, 'just some string', 123, True, False, None, ] for body in bodies: request = self._get_fake_patch_request() request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_invalid_changes(self): changes = [ ['a', 'list', 'of', 'stuff'], 'just some string', 123, True, False, None, {'op': 'invalid', 'path': '/name', 'value': 'fedora'} ] for change in changes: request = self._get_fake_patch_request() request.body = jsonutils.dump_as_bytes([change]) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update(self): request = self._get_fake_patch_request() body = [ {'op': 'replace', 'path': '/name', 'value': 'fedora'}, {'op': 'replace', 'path': '/tags', 'value': ['king', 'kong']}, {'op': 'replace', 'path': '/foo', 'value': 'bar'}, {'op': 'add', 'path': '/bebim', 'value': 'bap'}, {'op': 'remove', 'path': '/sparks'}, {'op': 'add', 'path': '/locations/-', 'value': {'url': 'scheme3://path3', 'metadata': {}}}, {'op': 'add', 'path': '/locations/10', 'value': {'url': 'scheme4://path4', 'metadata': {}}}, {'op': 'remove', 'path': '/locations/2'}, {'op': 'replace', 'path': '/locations', 'value': []}, {'op': 'replace', 'path': '/locations', 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]}, ] request.body = jsonutils.dump_as_bytes(body) output = self.deserializer.update(request) expected = {'changes': [ {'json_schema_version': 10, 'op': 'replace', 'path': ['name'], 'value': 'fedora'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, {'json_schema_version': 10, 'op': 'replace', 'path': ['foo'], 'value': 'bar'}, {'json_schema_version': 10, 'op': 'add', 'path': ['bebim'], 'value': 'bap'}, {'json_schema_version': 10, 'op': 'remove', 'path': ['sparks']}, {'json_schema_version': 10, 'op': 'add', 'path': ['locations', '-'], 'value': {'url': 'scheme3://path3', 'metadata': {}}}, {'json_schema_version': 10, 'op': 'add', 'path': ['locations', '10'], 'value': {'url': 'scheme4://path4', 'metadata': {}}}, {'json_schema_version': 10, 'op': 'remove', 'path': ['locations', '2']}, {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], 'value': []}, {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]}, ]} self.assertEqual(expected, output) def test_update_v2_0_compatibility(self): request = self._get_fake_patch_request(content_type_minor_version=0) body = [ {'replace': '/name', 'value': 'fedora'}, {'replace': '/tags', 'value': ['king', 'kong']}, {'replace': '/foo', 'value': 'bar'}, {'add': '/bebim', 'value': 'bap'}, {'remove': '/sparks'}, {'add': '/locations/-', 'value': {'url': 'scheme3://path3', 'metadata': {}}}, {'add': '/locations/10', 'value': {'url': 'scheme4://path4', 'metadata': {}}}, {'remove': '/locations/2'}, {'replace': '/locations', 'value': []}, {'replace': '/locations', 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]}, ] request.body = jsonutils.dump_as_bytes(body) output = self.deserializer.update(request) expected = {'changes': [ {'json_schema_version': 4, 'op': 'replace', 'path': ['name'], 'value': 'fedora'}, {'json_schema_version': 4, 'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, {'json_schema_version': 4, 'op': 'replace', 'path': ['foo'], 'value': 'bar'}, {'json_schema_version': 4, 'op': 'add', 'path': ['bebim'], 'value': 'bap'}, {'json_schema_version': 4, 'op': 'remove', 'path': ['sparks']}, {'json_schema_version': 4, 'op': 'add', 'path': ['locations', '-'], 'value': {'url': 'scheme3://path3', 'metadata': {}}}, {'json_schema_version': 4, 'op': 'add', 'path': ['locations', '10'], 'value': {'url': 'scheme4://path4', 'metadata': {}}}, {'json_schema_version': 4, 'op': 'remove', 'path': ['locations', '2']}, {'json_schema_version': 4, 'op': 'replace', 'path': ['locations'], 'value': []}, {'json_schema_version': 4, 'op': 'replace', 'path': ['locations'], 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]}, ]} self.assertEqual(expected, output) def test_update_base_attributes(self): request = self._get_fake_patch_request() body = [ {'op': 'replace', 'path': '/name', 'value': 'fedora'}, {'op': 'replace', 'path': '/visibility', 'value': 'public'}, {'op': 'replace', 'path': '/tags', 'value': ['king', 'kong']}, {'op': 'replace', 'path': '/protected', 'value': True}, {'op': 'replace', 'path': '/container_format', 'value': 'bare'}, {'op': 'replace', 'path': '/disk_format', 'value': 'raw'}, {'op': 'replace', 'path': '/min_ram', 'value': 128}, {'op': 'replace', 'path': '/min_disk', 'value': 10}, {'op': 'replace', 'path': '/locations', 'value': []}, {'op': 'replace', 'path': '/locations', 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]} ] request.body = jsonutils.dump_as_bytes(body) output = self.deserializer.update(request) expected = {'changes': [ {'json_schema_version': 10, 'op': 'replace', 'path': ['name'], 'value': 'fedora'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['visibility'], 'value': 'public'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['tags'], 'value': ['king', 'kong']}, {'json_schema_version': 10, 'op': 'replace', 'path': ['protected'], 'value': True}, {'json_schema_version': 10, 'op': 'replace', 'path': ['container_format'], 'value': 'bare'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['disk_format'], 'value': 'raw'}, {'json_schema_version': 10, 'op': 'replace', 'path': ['min_ram'], 'value': 128}, {'json_schema_version': 10, 'op': 'replace', 'path': ['min_disk'], 'value': 10}, {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], 'value': []}, {'json_schema_version': 10, 'op': 'replace', 'path': ['locations'], 'value': [{'url': 'scheme5://path5', 'metadata': {}}, {'url': 'scheme6://path6', 'metadata': {}}]} ]} self.assertEqual(expected, output) def test_update_disallowed_attributes(self): samples = { 'direct_url': '/a/b/c/d', 'self': '/e/f/g/h', 'file': '/e/f/g/h/file', 'schema': '/i/j/k', } for key, value in samples.items(): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPForbidden: pass # desired behavior else: self.fail("Updating %s did not result in HTTPForbidden" % key) def test_update_readonly_attributes(self): samples = { 'id': '00000000-0000-0000-0000-000000000000', 'status': 'active', 'checksum': 'abcdefghijklmnopqrstuvwxyz012345', 'size': 9001, 'virtual_size': 9001, 'created_at': ISOTIME, 'updated_at': ISOTIME, } for key, value in samples.items(): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPForbidden: pass # desired behavior else: self.fail("Updating %s did not result in HTTPForbidden" % key) def test_update_reserved_attributes(self): samples = { 'deleted': False, 'deleted_at': ISOTIME, } for key, value in samples.items(): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPForbidden: pass # desired behavior else: self.fail("Updating %s did not result in HTTPForbidden" % key) def test_update_invalid_attributes(self): keys = [ 'noslash', '///twoslash', '/two/ /slash', '/ / ', '/trailingslash/', '/lone~tilde', '/trailingtilde~' ] for key in keys: request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '%s' % key, 'value': 'dummy'}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPBadRequest: pass # desired behavior else: self.fail("Updating %s did not result in HTTPBadRequest" % key) def test_update_pointer_encoding(self): samples = { '/keywith~1slash': [u'keywith/slash'], '/keywith~0tilde': [u'keywith~tilde'], '/tricky~01': [u'tricky~1'], } for encoded, decoded in samples.items(): request = self._get_fake_patch_request() doc = [{'op': 'replace', 'path': '%s' % encoded, 'value': 'dummy'}] request.body = jsonutils.dump_as_bytes(doc) output = self.deserializer.update(request) self.assertEqual(decoded, output['changes'][0]['path']) def test_update_deep_limited_attributes(self): samples = { 'locations/1/2': [], } for key, value in samples.items(): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/%s' % key, 'value': value}] request.body = jsonutils.dump_as_bytes(body) try: self.deserializer.update(request) except webob.exc.HTTPBadRequest: pass # desired behavior else: self.fail("Updating %s did not result in HTTPBadRequest" % key) def test_update_v2_1_missing_operations(self): request = self._get_fake_patch_request() body = [{'path': '/colburn', 'value': 'arcata'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_1_missing_value(self): request = self._get_fake_patch_request() body = [{'op': 'replace', 'path': '/colburn'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_1_missing_path(self): request = self._get_fake_patch_request() body = [{'op': 'replace', 'value': 'arcata'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_0_multiple_operations(self): request = self._get_fake_patch_request(content_type_minor_version=0) body = [{'replace': '/foo', 'add': '/bar', 'value': 'snore'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_0_missing_operations(self): request = self._get_fake_patch_request(content_type_minor_version=0) body = [{'value': 'arcata'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update_v2_0_missing_value(self): request = self._get_fake_patch_request(content_type_minor_version=0) body = [{'replace': '/colburn'}] request.body = jsonutils.dump_as_bytes(body) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_index(self): marker = str(uuid.uuid4()) path = '/images?limit=1&marker=%s&member_status=pending' % marker request = unit_test_utils.get_fake_request(path) expected = {'limit': 1, 'marker': marker, 'sort_key': ['created_at'], 'sort_dir': ['desc'], 'member_status': 'pending', 'filters': {}} output = self.deserializer.index(request) self.assertEqual(expected, output) def test_index_with_filter(self): name = 'My Little Image' path = '/images?name=%s' % name request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(name, output['filters']['name']) def test_index_strip_params_from_filters(self): name = 'My Little Image' path = '/images?name=%s' % name request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(name, output['filters']['name']) self.assertEqual(1, len(output['filters'])) def test_index_with_many_filter(self): name = 'My Little Image' instance_id = str(uuid.uuid4()) path = ('/images?name=%(name)s&id=%(instance_id)s' % {'name': name, 'instance_id': instance_id}) request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(name, output['filters']['name']) self.assertEqual(instance_id, output['filters']['id']) def test_index_with_filter_and_limit(self): name = 'My Little Image' path = '/images?name=%s&limit=1' % name request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(name, output['filters']['name']) self.assertEqual(1, output['limit']) def test_index_non_integer_limit(self): request = unit_test_utils.get_fake_request('/images?limit=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_zero_limit(self): request = unit_test_utils.get_fake_request('/images?limit=0') expected = {'limit': 0, 'sort_key': ['created_at'], 'member_status': 'accepted', 'sort_dir': ['desc'], 'filters': {}} output = self.deserializer.index(request) self.assertEqual(expected, output) def test_index_negative_limit(self): request = unit_test_utils.get_fake_request('/images?limit=-1') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_fraction(self): request = unit_test_utils.get_fake_request('/images?limit=1.1') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_invalid_status(self): path = '/images?member_status=blah' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_marker(self): marker = str(uuid.uuid4()) path = '/images?marker=%s' % marker request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(marker, output.get('marker')) def test_index_marker_not_specified(self): request = unit_test_utils.get_fake_request('/images') output = self.deserializer.index(request) self.assertNotIn('marker', output) def test_index_limit_not_specified(self): request = unit_test_utils.get_fake_request('/images') output = self.deserializer.index(request) self.assertNotIn('limit', output) def test_index_sort_key_id(self): request = unit_test_utils.get_fake_request('/images?sort_key=id') output = self.deserializer.index(request) expected = { 'sort_key': ['id'], 'sort_dir': ['desc'], 'member_status': 'accepted', 'filters': {} } self.assertEqual(expected, output) def test_index_multiple_sort_keys(self): request = unit_test_utils.get_fake_request('/images?' 'sort_key=name&' 'sort_key=size') output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['desc'], 'member_status': 'accepted', 'filters': {} } self.assertEqual(expected, output) def test_index_invalid_multiple_sort_keys(self): # blah is an invalid sort key request = unit_test_utils.get_fake_request('/images?' 'sort_key=name&' 'sort_key=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_dir_asc(self): request = unit_test_utils.get_fake_request('/images?sort_dir=asc') output = self.deserializer.index(request) expected = { 'sort_key': ['created_at'], 'sort_dir': ['asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_multiple_sort_dirs(self): req_string = ('/images?sort_key=name&sort_dir=asc&' 'sort_key=id&sort_dir=desc') request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'id'], 'sort_dir': ['asc', 'desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_single_key_default_dir(self): req_string = '/images?sort=name' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name'], 'sort_dir': ['desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_single_key_desc_dir(self): req_string = '/images?sort=name:desc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name'], 'sort_dir': ['desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_multiple_keys_default_dir(self): req_string = '/images?sort=name,size' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['desc', 'desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_multiple_keys_asc_dir(self): req_string = '/images?sort=name:asc,size:asc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['asc', 'asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_multiple_keys_different_dirs(self): req_string = '/images?sort=name:desc,size:asc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['desc', 'asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_new_sorting_syntax_multiple_keys_optional_dir(self): req_string = '/images?sort=name:asc,size' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['asc', 'desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) req_string = '/images?sort=name,size:asc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'size'], 'sort_dir': ['desc', 'asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) req_string = '/images?sort=name,id:asc,size' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'id', 'size'], 'sort_dir': ['desc', 'asc', 'desc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) req_string = '/images?sort=name:asc,id,size:asc' request = unit_test_utils.get_fake_request(req_string) output = self.deserializer.index(request) expected = { 'sort_key': ['name', 'id', 'size'], 'sort_dir': ['asc', 'desc', 'asc'], 'member_status': 'accepted', 'filters': {}} self.assertEqual(expected, output) def test_index_sort_wrong_sort_dirs_number(self): req_string = '/images?sort_key=name&sort_dir=asc&sort_dir=desc' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_dirs_fewer_than_keys(self): req_string = ('/images?sort_key=name&sort_dir=asc&sort_key=id&' 'sort_dir=asc&sort_key=created_at') request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_wrong_sort_dirs_number_without_key(self): req_string = '/images?sort_dir=asc&sort_dir=desc' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_private_key(self): request = unit_test_utils.get_fake_request('/images?sort_key=min_ram') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_key_invalid_value(self): # blah is an invalid sort key request = unit_test_utils.get_fake_request('/images?sort_key=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_sort_dir_invalid_value(self): # foo is an invalid sort dir request = unit_test_utils.get_fake_request('/images?sort_dir=foo') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_new_sorting_syntax_invalid_request(self): # 'blah' is not a supported sorting key req_string = '/images?sort=blah' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) req_string = '/images?sort=name,blah' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) # 'foo' isn't a valid sort direction req_string = '/images?sort=name:foo' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) # 'asc:desc' isn't a valid sort direction req_string = '/images?sort=name:asc:desc' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_combined_sorting_syntax(self): req_string = '/images?sort_dir=name&sort=name' request = unit_test_utils.get_fake_request(req_string) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_with_tag(self): path = '/images?tag=%s&tag=%s' % ('x86', '64bit') request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(sorted(['x86', '64bit']), sorted(output['filters']['tags'])) class TestImagesDeserializerWithExtendedSchema(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializerWithExtendedSchema, self).setUp() self.config(allow_additional_image_properties=False) custom_image_properties = { 'pants': { 'type': 'string', 'enum': ['on', 'off'], }, } schema = glance.api.v2.images.get_schema(custom_image_properties) self.deserializer = glance.api.v2.images.RequestDeserializer(schema) def test_create(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'name': 'image-1', 'pants': 'on' }) output = self.deserializer.create(request) expected = { 'image': {'name': 'image-1'}, 'extra_properties': {'pants': 'on'}, 'tags': [], } self.assertEqual(expected, output) def test_create_bad_data(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'name': 'image-1', 'pants': 'borked' }) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/pants', 'value': 'off'}] request.body = jsonutils.dump_as_bytes(doc) output = self.deserializer.update(request) expected = {'changes': [ {'json_schema_version': 10, 'op': 'add', 'path': ['pants'], 'value': 'off'}, ]} self.assertEqual(expected, output) def test_update_bad_data(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/pants', 'value': 'cutoffs'}] request.body = jsonutils.dump_as_bytes(doc) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) class TestImagesDeserializerWithAdditionalProperties(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializerWithAdditionalProperties, self).setUp() self.config(allow_additional_image_properties=True) self.deserializer = glance.api.v2.images.RequestDeserializer() def test_create(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'foo': 'bar'}) output = self.deserializer.create(request) expected = {'image': {}, 'extra_properties': {'foo': 'bar'}, 'tags': []} self.assertEqual(expected, output) def test_create_with_numeric_property(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'abc': 123}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update_with_numeric_property(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/foo', 'value': 123}] request.body = jsonutils.dump_as_bytes(doc) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_create_with_list_property(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'foo': ['bar']}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update_with_list_property(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/foo', 'value': ['bar', 'baz']}] request.body = jsonutils.dump_as_bytes(doc) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) def test_update(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/foo', 'value': 'bar'}] request.body = jsonutils.dump_as_bytes(doc) output = self.deserializer.update(request) change = { 'json_schema_version': 10, 'op': 'add', 'path': ['foo'], 'value': 'bar' } self.assertEqual({'changes': [change]}, output) class TestImagesDeserializerNoAdditionalProperties(test_utils.BaseTestCase): def setUp(self): super(TestImagesDeserializerNoAdditionalProperties, self).setUp() self.config(allow_additional_image_properties=False) self.deserializer = glance.api.v2.images.RequestDeserializer() def test_create_with_additional_properties_disallowed(self): self.config(allow_additional_image_properties=False) request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({'foo': 'bar'}) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_update(self): request = unit_test_utils.get_fake_request() request.content_type = 'application/openstack-images-v2.1-json-patch' doc = [{'op': 'add', 'path': '/foo', 'value': 'bar'}] request.body = jsonutils.dump_as_bytes(doc) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.update, request) class TestImagesSerializer(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializer, self).setUp() self.serializer = glance.api.v2.images.ResponseSerializer() self.fixtures = [ # NOTE(bcwaldon): This first fixture has every property defined _domain_fixture(UUID1, name='image-1', size=1024, virtual_size=3072, created_at=DATETIME, updated_at=DATETIME, owner=TENANT1, visibility='public', container_format='ami', tags=['one', 'two'], disk_format='ami', min_ram=128, min_disk=10, checksum='ca425b88f047ce8ec45ee90e813ada91'), # NOTE(bcwaldon): This second fixture depends on default behavior # and sets most values to None _domain_fixture(UUID2, created_at=DATETIME, updated_at=DATETIME), ] def test_index(self): expected = { 'images': [ { 'id': UUID1, 'name': 'image-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'tags': set(['one', 'two']), 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', }, { 'id': UUID2, 'status': 'queued', 'visibility': 'private', 'protected': False, 'tags': set([]), 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'size': None, 'name': None, 'owner': None, 'min_ram': None, 'min_disk': None, 'checksum': None, 'disk_format': None, 'virtual_size': None, 'container_format': None, }, ], 'first': '/v2/images', 'schema': '/v2/schemas/images', } request = webob.Request.blank('/v2/images') response = webob.Response(request=request) result = {'images': self.fixtures} self.serializer.index(response, result) actual = jsonutils.loads(response.body) for image in actual['images']: image['tags'] = set(image['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_index_next_marker(self): request = webob.Request.blank('/v2/images') response = webob.Response(request=request) result = {'images': self.fixtures, 'next_marker': UUID2} self.serializer.index(response, result) output = jsonutils.loads(response.body) self.assertEqual('/v2/images?marker=%s' % UUID2, output['next']) def test_index_carries_query_parameters(self): url = '/v2/images?limit=10&sort_key=id&sort_dir=asc' request = webob.Request.blank(url) response = webob.Response(request=request) result = {'images': self.fixtures, 'next_marker': UUID2} self.serializer.index(response, result) output = jsonutils.loads(response.body) expected_url = '/v2/images?limit=10&sort_dir=asc&sort_key=id' self.assertEqual(unit_test_utils.sort_url_by_qs_keys(expected_url), unit_test_utils.sort_url_by_qs_keys(output['first'])) expect_next = '/v2/images?limit=10&marker=%s&sort_dir=asc&sort_key=id' self.assertEqual(unit_test_utils.sort_url_by_qs_keys( expect_next % UUID2), unit_test_utils.sort_url_by_qs_keys(output['next'])) def test_index_forbidden_get_image_location(self): """Make sure the serializer works fine. No mater if current user is authorized to get image location if the show_multiple_locations is False. """ class ImageLocations(object): def __len__(self): raise exception.Forbidden() self.config(show_multiple_locations=False) self.config(show_image_direct_url=False) url = '/v2/images?limit=10&sort_key=id&sort_dir=asc' request = webob.Request.blank(url) response = webob.Response(request=request) result = {'images': self.fixtures} self.assertEqual(200, response.status_int) # The image index should work though the user is forbidden result['images'][0].locations = ImageLocations() self.serializer.index(response, result) self.assertEqual(200, response.status_int) def test_show_full_fixture(self): expected = { 'id': UUID1, 'name': 'image-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'tags': set(['one', 'two']), 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.show(response, self.fixtures[0]) actual = jsonutils.loads(response.body) actual['tags'] = set(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_show_minimal_fixture(self): expected = { 'id': UUID2, 'status': 'queued', 'visibility': 'private', 'protected': False, 'tags': [], 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'size': None, 'name': None, 'owner': None, 'min_ram': None, 'min_disk': None, 'checksum': None, 'disk_format': None, 'virtual_size': None, 'container_format': None, } response = webob.Response() self.serializer.show(response, self.fixtures[1]) self.assertEqual(expected, jsonutils.loads(response.body)) def test_create(self): expected = { 'id': UUID1, 'name': 'image-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'tags': ['one', 'two'], 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.create(response, self.fixtures[0]) self.assertEqual(201, response.status_int) actual = jsonutils.loads(response.body) actual['tags'] = sorted(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) self.assertEqual('/v2/images/%s' % UUID1, response.location) def test_update(self): expected = { 'id': UUID1, 'name': 'image-1', 'status': 'queued', 'visibility': 'public', 'protected': False, 'tags': set(['one', 'two']), 'size': 1024, 'virtual_size': 3072, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID1, 'file': '/v2/images/%s/file' % UUID1, 'schema': '/v2/schemas/image', 'owner': '6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.update(response, self.fixtures[0]) actual = jsonutils.loads(response.body) actual['tags'] = set(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) class TestImagesSerializerWithUnicode(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializerWithUnicode, self).setUp() self.serializer = glance.api.v2.images.ResponseSerializer() self.fixtures = [ # NOTE(bcwaldon): This first fixture has every property defined _domain_fixture(UUID1, **{ 'name': u'OpenStack\u2122-1', 'size': 1024, 'virtual_size': 3072, 'tags': [u'\u2160', u'\u2161'], 'created_at': DATETIME, 'updated_at': DATETIME, 'owner': TENANT1, 'visibility': 'public', 'container_format': 'ami', 'disk_format': 'ami', 'min_ram': 128, 'min_disk': 10, 'checksum': u'ca425b88f047ce8ec45ee90e813ada91', 'extra_properties': {'lang': u'Fran\u00E7ais', u'dispos\u00E9': u'f\u00E2ch\u00E9'}, }), ] def test_index(self): expected = { u'images': [ { u'id': UUID1, u'name': u'OpenStack\u2122-1', u'status': u'queued', u'visibility': u'public', u'protected': False, u'tags': [u'\u2160', u'\u2161'], u'size': 1024, u'virtual_size': 3072, u'checksum': u'ca425b88f047ce8ec45ee90e813ada91', u'container_format': u'ami', u'disk_format': u'ami', u'min_ram': 128, u'min_disk': 10, u'created_at': six.text_type(ISOTIME), u'updated_at': six.text_type(ISOTIME), u'self': u'/v2/images/%s' % UUID1, u'file': u'/v2/images/%s/file' % UUID1, u'schema': u'/v2/schemas/image', u'lang': u'Fran\u00E7ais', u'dispos\u00E9': u'f\u00E2ch\u00E9', u'owner': u'6838eb7b-6ded-434a-882c-b344c77fe8df', }, ], u'first': u'/v2/images', u'schema': u'/v2/schemas/images', } request = webob.Request.blank('/v2/images') response = webob.Response(request=request) result = {u'images': self.fixtures} self.serializer.index(response, result) actual = jsonutils.loads(response.body) actual['images'][0]['tags'] = sorted(actual['images'][0]['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_show_full_fixture(self): expected = { u'id': UUID1, u'name': u'OpenStack\u2122-1', u'status': u'queued', u'visibility': u'public', u'protected': False, u'tags': set([u'\u2160', u'\u2161']), u'size': 1024, u'virtual_size': 3072, u'checksum': u'ca425b88f047ce8ec45ee90e813ada91', u'container_format': u'ami', u'disk_format': u'ami', u'min_ram': 128, u'min_disk': 10, u'created_at': six.text_type(ISOTIME), u'updated_at': six.text_type(ISOTIME), u'self': u'/v2/images/%s' % UUID1, u'file': u'/v2/images/%s/file' % UUID1, u'schema': u'/v2/schemas/image', u'lang': u'Fran\u00E7ais', u'dispos\u00E9': u'f\u00E2ch\u00E9', u'owner': u'6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.show(response, self.fixtures[0]) actual = jsonutils.loads(response.body) actual['tags'] = set(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_create(self): expected = { u'id': UUID1, u'name': u'OpenStack\u2122-1', u'status': u'queued', u'visibility': u'public', u'protected': False, u'tags': [u'\u2160', u'\u2161'], u'size': 1024, u'virtual_size': 3072, u'checksum': u'ca425b88f047ce8ec45ee90e813ada91', u'container_format': u'ami', u'disk_format': u'ami', u'min_ram': 128, u'min_disk': 10, u'created_at': six.text_type(ISOTIME), u'updated_at': six.text_type(ISOTIME), u'self': u'/v2/images/%s' % UUID1, u'file': u'/v2/images/%s/file' % UUID1, u'schema': u'/v2/schemas/image', u'lang': u'Fran\u00E7ais', u'dispos\u00E9': u'f\u00E2ch\u00E9', u'owner': u'6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.create(response, self.fixtures[0]) self.assertEqual(201, response.status_int) actual = jsonutils.loads(response.body) actual['tags'] = sorted(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) self.assertEqual('/v2/images/%s' % UUID1, response.location) def test_update(self): expected = { u'id': UUID1, u'name': u'OpenStack\u2122-1', u'status': u'queued', u'visibility': u'public', u'protected': False, u'tags': set([u'\u2160', u'\u2161']), u'size': 1024, u'virtual_size': 3072, u'checksum': u'ca425b88f047ce8ec45ee90e813ada91', u'container_format': u'ami', u'disk_format': u'ami', u'min_ram': 128, u'min_disk': 10, u'created_at': six.text_type(ISOTIME), u'updated_at': six.text_type(ISOTIME), u'self': u'/v2/images/%s' % UUID1, u'file': u'/v2/images/%s/file' % UUID1, u'schema': u'/v2/schemas/image', u'lang': u'Fran\u00E7ais', u'dispos\u00E9': u'f\u00E2ch\u00E9', u'owner': u'6838eb7b-6ded-434a-882c-b344c77fe8df', } response = webob.Response() self.serializer.update(response, self.fixtures[0]) actual = jsonutils.loads(response.body) actual['tags'] = set(actual['tags']) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) class TestImagesSerializerWithExtendedSchema(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializerWithExtendedSchema, self).setUp() self.config(allow_additional_image_properties=False) custom_image_properties = { 'color': { 'type': 'string', 'enum': ['red', 'green'], }, } schema = glance.api.v2.images.get_schema(custom_image_properties) self.serializer = glance.api.v2.images.ResponseSerializer(schema) props = dict(color='green', mood='grouchy') self.fixture = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties=props) def test_show(self): expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'tags': [], 'size': 1024, 'virtual_size': 3072, 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'color': 'green', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, } response = webob.Response() self.serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) def test_show_reports_invalid_data(self): self.fixture.extra_properties['color'] = 'invalid' expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'tags': [], 'size': 1024, 'virtual_size': 3072, 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'color': 'invalid', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, } response = webob.Response() self.serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) class TestImagesSerializerWithAdditionalProperties(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializerWithAdditionalProperties, self).setUp() self.config(allow_additional_image_properties=True) self.fixture = _domain_fixture( UUID2, name='image-2', owner=TENANT2, checksum='ca425b88f047ce8ec45ee90e813ada91', created_at=DATETIME, updated_at=DATETIME, size=1024, virtual_size=3072, extra_properties={'marx': 'groucho'}) def test_show(self): serializer = glance.api.v2.images.ResponseSerializer() expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'marx': 'groucho', 'tags': [], 'size': 1024, 'virtual_size': 3072, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, } response = webob.Response() serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) def test_show_invalid_additional_property(self): """Ensure that the serializer passes through invalid additional properties. It must not complains with i.e. non-string. """ serializer = glance.api.v2.images.ResponseSerializer() self.fixture.extra_properties['marx'] = 123 expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'marx': 123, 'tags': [], 'size': 1024, 'virtual_size': 3072, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, } response = webob.Response() serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) def test_show_with_additional_properties_disabled(self): self.config(allow_additional_image_properties=False) serializer = glance.api.v2.images.ResponseSerializer() expected = { 'id': UUID2, 'name': 'image-2', 'status': 'queued', 'visibility': 'private', 'protected': False, 'checksum': 'ca425b88f047ce8ec45ee90e813ada91', 'tags': [], 'size': 1024, 'virtual_size': 3072, 'owner': '2c014f32-55eb-467d-8fcb-4bd706012f81', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/images/%s' % UUID2, 'file': '/v2/images/%s/file' % UUID2, 'schema': '/v2/schemas/image', 'min_ram': None, 'min_disk': None, 'disk_format': None, 'container_format': None, } response = webob.Response() serializer.show(response, self.fixture) self.assertEqual(expected, jsonutils.loads(response.body)) class TestImagesSerializerDirectUrl(test_utils.BaseTestCase): def setUp(self): super(TestImagesSerializerDirectUrl, self).setUp() self.serializer = glance.api.v2.images.ResponseSerializer() self.active_image = _domain_fixture( UUID1, name='image-1', visibility='public', status='active', size=1024, virtual_size=3072, created_at=DATETIME, updated_at=DATETIME, locations=[{'id': '1', 'url': 'http://some/fake/location', 'metadata': {}, 'status': 'active'}]) self.queued_image = _domain_fixture( UUID2, name='image-2', status='active', created_at=DATETIME, updated_at=DATETIME, checksum='ca425b88f047ce8ec45ee90e813ada91') self.location_data_image_url = 'http://abc.com/somewhere' self.location_data_image_meta = {'key': 98231} self.location_data_image = _domain_fixture( UUID2, name='image-2', status='active', created_at=DATETIME, updated_at=DATETIME, locations=[{'id': '2', 'url': self.location_data_image_url, 'metadata': self.location_data_image_meta, 'status': 'active'}]) def _do_index(self): request = webob.Request.blank('/v2/images') response = webob.Response(request=request) self.serializer.index(response, {'images': [self.active_image, self.queued_image]}) return jsonutils.loads(response.body)['images'] def _do_show(self, image): request = webob.Request.blank('/v2/images') response = webob.Response(request=request) self.serializer.show(response, image) return jsonutils.loads(response.body) def test_index_store_location_enabled(self): self.config(show_image_direct_url=True) images = self._do_index() # NOTE(markwash): ordering sanity check self.assertEqual(UUID1, images[0]['id']) self.assertEqual(UUID2, images[1]['id']) self.assertEqual('http://some/fake/location', images[0]['direct_url']) self.assertNotIn('direct_url', images[1]) def test_index_store_multiple_location_enabled(self): self.config(show_multiple_locations=True) request = webob.Request.blank('/v2/images') response = webob.Response(request=request) self.serializer.index(response, {'images': [self.location_data_image]}), images = jsonutils.loads(response.body)['images'] location = images[0]['locations'][0] self.assertEqual(location['url'], self.location_data_image_url) self.assertEqual(location['metadata'], self.location_data_image_meta) def test_index_store_location_explicitly_disabled(self): self.config(show_image_direct_url=False) images = self._do_index() self.assertNotIn('direct_url', images[0]) self.assertNotIn('direct_url', images[1]) def test_show_location_enabled(self): self.config(show_image_direct_url=True) image = self._do_show(self.active_image) self.assertEqual('http://some/fake/location', image['direct_url']) def test_show_location_enabled_but_not_set(self): self.config(show_image_direct_url=True) image = self._do_show(self.queued_image) self.assertNotIn('direct_url', image) def test_show_location_explicitly_disabled(self): self.config(show_image_direct_url=False) image = self._do_show(self.active_image) self.assertNotIn('direct_url', image) class TestImageSchemaFormatConfiguration(test_utils.BaseTestCase): def test_default_disk_formats(self): schema = glance.api.v2.images.get_schema() expected = [None, 'ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] actual = schema.properties['disk_format']['enum'] self.assertEqual(expected, actual) def test_custom_disk_formats(self): self.config(disk_formats=['gabe'], group="image_format") schema = glance.api.v2.images.get_schema() expected = [None, 'gabe'] actual = schema.properties['disk_format']['enum'] self.assertEqual(expected, actual) def test_default_container_formats(self): schema = glance.api.v2.images.get_schema() expected = [None, 'ami', 'ari', 'aki', 'bare', 'ovf', 'ova', 'docker'] actual = schema.properties['container_format']['enum'] self.assertEqual(expected, actual) def test_custom_container_formats(self): self.config(container_formats=['mark'], group="image_format") schema = glance.api.v2.images.get_schema() expected = [None, 'mark'] actual = schema.properties['container_format']['enum'] self.assertEqual(expected, actual) class TestImageSchemaDeterminePropertyBasis(test_utils.BaseTestCase): def test_custom_property_marked_as_non_base(self): self.config(allow_additional_image_properties=False) custom_image_properties = { 'pants': { 'type': 'string', }, } schema = glance.api.v2.images.get_schema(custom_image_properties) self.assertFalse(schema.properties['pants'].get('is_base', True)) def test_base_property_marked_as_base(self): schema = glance.api.v2.images.get_schema() self.assertTrue(schema.properties['disk_format'].get('is_base', True)) glance-12.0.0/glance/tests/unit/v2/test_tasks_resource.py0000664000567000056710000010236712701407047024545 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_config import cfg from oslo_serialization import jsonutils # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import webob import glance.api.v2.tasks from glance.common import timeutils import glance.domain import glance.gateway from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils import glance.tests.utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' UUID3 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' UUID4 = '6bbe7cc2-eae7-4c0f-b50d-a7160b0c6a86' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' DATETIME = datetime.datetime(2013, 9, 28, 15, 27, 36, 325355) ISOTIME = '2013-09-28T15:27:36Z' def _db_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() obj = { 'id': task_id, 'status': 'pending', 'type': 'import', 'input': {}, 'result': None, 'owner': None, 'message': None, 'expires_at': None, 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False } obj.update(kwargs) return obj def _domain_fixture(task_id, **kwargs): default_datetime = timeutils.utcnow() task_properties = { 'task_id': task_id, 'status': kwargs.get('status', 'pending'), 'task_type': kwargs.get('type', 'import'), 'owner': kwargs.get('owner', None), 'expires_at': kwargs.get('expires_at', None), 'created_at': kwargs.get('created_at', default_datetime), 'updated_at': kwargs.get('updated_at', default_datetime), 'task_input': kwargs.get('task_input', {}), 'message': kwargs.get('message', None), 'result': kwargs.get('result', None) } task = glance.domain.Task(**task_properties) return task CONF = cfg.CONF CONF.import_opt('task_time_to_live', 'glance.common.config', group='task') class TestTasksController(test_utils.BaseTestCase): def setUp(self): super(TestTasksController, self).setUp() self.db = unit_test_utils.FakeDB(initialize=False) self.policy = unit_test_utils.FakePolicyEnforcer() self.notifier = unit_test_utils.FakeNotifier() self.store = unit_test_utils.FakeStoreAPI() self._create_tasks() self.controller = glance.api.v2.tasks.TasksController(self.db, self.policy, self.notifier, self.store) self.gateway = glance.gateway.Gateway(self.db, self.store, self.notifier, self.policy) def _create_tasks(self): now = timeutils.utcnow() times = [now + datetime.timedelta(seconds=5 * i) for i in range(4)] self.tasks = [ _db_fixture(UUID1, owner=TENANT1, created_at=times[0], updated_at=times[0]), # FIXME(venkatesh): change the type to include clone and export # once they are included as a valid types under Task domain model. _db_fixture(UUID2, owner=TENANT2, type='import', created_at=times[1], updated_at=times[1]), _db_fixture(UUID3, owner=TENANT3, type='import', created_at=times[2], updated_at=times[2]), _db_fixture(UUID4, owner=TENANT4, type='import', created_at=times[3], updated_at=times[3])] [self.db.task_create(None, task) for task in self.tasks] def test_index(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request() output = self.controller.index(request) self.assertEqual(1, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID1]) self.assertEqual(expected, actual) def test_index_admin(self): request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request) self.assertEqual(4, len(output['tasks'])) def test_index_return_parameters(self): self.config(limit_param_default=1, api_limit_max=4) request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request, marker=UUID3, limit=1, sort_key='created_at', sort_dir='desc') self.assertEqual(1, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID2]) self.assertEqual(expected, actual) self.assertEqual(UUID2, output['next_marker']) def test_index_next_marker(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request, marker=UUID3, limit=2) self.assertEqual(2, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID2, UUID1]) self.assertEqual(expected, actual) self.assertEqual(UUID1, output['next_marker']) def test_index_no_next_marker(self): self.config(limit_param_default=1, api_limit_max=3) request = unit_test_utils.get_fake_request(is_admin=True) output = self.controller.index(request, marker=UUID1, limit=2) self.assertEqual(0, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([]) self.assertEqual(expected, actual) self.assertNotIn('next_marker', output) def test_index_with_id_filter(self): request = unit_test_utils.get_fake_request('/tasks?id=%s' % UUID1) output = self.controller.index(request, filters={'id': UUID1}) self.assertEqual(1, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID1]) self.assertEqual(expected, actual) def test_index_with_filters_return_many(self): path = '/tasks?status=pending' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, filters={'status': 'pending'}) self.assertEqual(4, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID1, UUID2, UUID3, UUID4]) self.assertEqual(sorted(expected), sorted(actual)) def test_index_with_many_filters(self): url = '/tasks?status=pending&type=import' request = unit_test_utils.get_fake_request(url, is_admin=True) output = self.controller.index(request, filters={ 'status': 'pending', 'type': 'import', 'owner': TENANT1, }) self.assertEqual(1, len(output['tasks'])) actual = set([task.task_id for task in output['tasks']]) expected = set([UUID1]) self.assertEqual(expected, actual) def test_index_with_marker(self): self.config(limit_param_default=1, api_limit_max=3) path = '/tasks' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, marker=UUID3) actual = set([task.task_id for task in output['tasks']]) self.assertEqual(1, len(actual)) self.assertIn(UUID2, actual) def test_index_with_limit(self): path = '/tasks' limit = 2 request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, limit=limit) actual = set([task.task_id for task in output['tasks']]) self.assertEqual(limit, len(actual)) def test_index_greater_than_limit_max(self): self.config(limit_param_default=1, api_limit_max=3) path = '/tasks' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, limit=4) actual = set([task.task_id for task in output['tasks']]) self.assertEqual(3, len(actual)) self.assertNotIn(output['next_marker'], output) def test_index_default_limit(self): self.config(limit_param_default=1, api_limit_max=3) path = '/tasks' request = unit_test_utils.get_fake_request(path) output = self.controller.index(request) actual = set([task.task_id for task in output['tasks']]) self.assertEqual(1, len(actual)) def test_index_with_sort_dir(self): path = '/tasks' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, sort_dir='asc', limit=3) actual = [task.task_id for task in output['tasks']] self.assertEqual(3, len(actual)) self.assertEqual([UUID1, UUID2, UUID3], actual) def test_index_with_sort_key(self): path = '/tasks' request = unit_test_utils.get_fake_request(path, is_admin=True) output = self.controller.index(request, sort_key='created_at', limit=3) actual = [task.task_id for task in output['tasks']] self.assertEqual(3, len(actual)) self.assertEqual(UUID4, actual[0]) self.assertEqual(UUID3, actual[1]) self.assertEqual(UUID2, actual[2]) def test_index_with_marker_not_found(self): fake_uuid = str(uuid.uuid4()) path = '/tasks' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, marker=fake_uuid) def test_index_with_marker_is_not_like_uuid(self): marker = 'INVALID_UUID' path = '/tasks' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, marker=marker) def test_index_invalid_sort_key(self): path = '/tasks' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, request, sort_key='foo') def test_index_zero_tasks(self): self.db.reset() request = unit_test_utils.get_fake_request() output = self.controller.index(request) self.assertEqual([], output['tasks']) def test_get(self): request = unit_test_utils.get_fake_request() task = self.controller.get(request, task_id=UUID1) self.assertEqual(UUID1, task.task_id) self.assertEqual('import', task.type) def test_get_non_existent(self): request = unit_test_utils.get_fake_request() task_id = str(uuid.uuid4()) self.assertRaises(webob.exc.HTTPNotFound, self.controller.get, request, task_id) def test_get_not_allowed(self): request = unit_test_utils.get_fake_request() self.assertEqual(TENANT1, request.context.tenant) self.assertRaises(webob.exc.HTTPNotFound, self.controller.get, request, UUID4) @mock.patch.object(glance.gateway.Gateway, 'get_task_factory') @mock.patch.object(glance.gateway.Gateway, 'get_task_executor_factory') @mock.patch.object(glance.gateway.Gateway, 'get_task_repo') def test_create(self, mock_get_task_repo, mock_get_task_executor_factory, mock_get_task_factory): # setup request = unit_test_utils.get_fake_request() task = { "type": "import", "input": { "import_from": "swift://cloud.foo/myaccount/mycontainer/path", "import_from_format": "qcow2", "image_properties": {} } } get_task_factory = mock.Mock() mock_get_task_factory.return_value = get_task_factory new_task = mock.Mock() get_task_factory.new_task.return_value = new_task new_task.run.return_value = mock.ANY get_task_executor_factory = mock.Mock() mock_get_task_executor_factory.return_value = get_task_executor_factory get_task_executor_factory.new_task_executor.return_value = mock.Mock() get_task_repo = mock.Mock() mock_get_task_repo.return_value = get_task_repo get_task_repo.add.return_value = mock.Mock() # call self.controller.create(request, task=task) # assert self.assertEqual(1, get_task_factory.new_task.call_count) self.assertEqual(1, get_task_repo.add.call_count) self.assertEqual( 1, get_task_executor_factory.new_task_executor.call_count) @mock.patch('glance.common.scripts.utils.get_image_data_iter') @mock.patch('glance.common.scripts.utils.validate_location_uri') def test_create_with_live_time(self, mock_validate_location_uri, mock_get_image_data_iter): request = unit_test_utils.get_fake_request() task = { "type": "import", "input": { "import_from": "http://download.cirros-cloud.net/0.3.4/" "cirros-0.3.4-x86_64-disk.img", "import_from_format": "qcow2", "image_properties": { "disk_format": "qcow2", "container_format": "bare", "name": "test-task" } } } new_task = self.controller.create(request, task=task) executor_factory = self.gateway.get_task_executor_factory( request.context) task_executor = executor_factory.new_task_executor(request.context) task_executor.begin_processing(new_task.task_id) success_task = self.controller.get(request, new_task.task_id) # ignore second and microsecond to avoid flaky runs task_live_time = (success_task.expires_at.replace(second=0, microsecond=0) - success_task.updated_at.replace(second=0, microsecond=0)) task_live_time_hour = (task_live_time.days * 24 + task_live_time.seconds / 3600) self.assertEqual(CONF.task.task_time_to_live, task_live_time_hour) def test_create_with_wrong_import_form(self): request = unit_test_utils.get_fake_request() wrong_import_from = [ "swift://cloud.foo/myaccount/mycontainer/path", "file:///path", "s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id", "cinder://volume-id" ] executor_factory = self.gateway.get_task_executor_factory( request.context) task_repo = self.gateway.get_task_repo(request.context) for import_from in wrong_import_from: task = { "type": "import", "input": { "import_from": import_from, "import_from_format": "qcow2", "image_properties": { "disk_format": "qcow2", "container_format": "bare", "name": "test-task" } } } new_task = self.controller.create(request, task=task) task_executor = executor_factory.new_task_executor(request.context) task_executor.begin_processing(new_task.task_id) final_task = task_repo.get(new_task.task_id) self.assertEqual('failure', final_task.status) if import_from.startswith("file:///"): msg = ("File based imports are not allowed. Please use a " "non-local source of image data.") else: supported = ['http', ] msg = ("The given uri is not valid. Please specify a " "valid uri from the following list of supported uri " "%(supported)s") % {'supported': supported} self.assertEqual(msg, final_task.message) @mock.patch.object(glance.gateway.Gateway, 'get_task_factory') def test_notifications_on_create(self, mock_get_task_factory): request = unit_test_utils.get_fake_request() new_task = mock.MagicMock(type='import') mock_get_task_factory.new_task.return_value = new_task new_task.run.return_value = mock.ANY task = {"type": "import", "input": { "import_from": "http://cloud.foo/myaccount/mycontainer/path", "import_from_format": "qcow2", "image_properties": {} } } task = self.controller.create(request, task=task) output_logs = [nlog for nlog in self.notifier.get_logs() if nlog['event_type'] == 'task.create'] self.assertEqual(1, len(output_logs)) output_log = output_logs[0] self.assertEqual('INFO', output_log['notification_type']) self.assertEqual('task.create', output_log['event_type']) class TestTasksControllerPolicies(base.IsolatedUnitTest): def setUp(self): super(TestTasksControllerPolicies, self).setUp() self.db = unit_test_utils.FakeDB() self.policy = unit_test_utils.FakePolicyEnforcer() self.controller = glance.api.v2.tasks.TasksController(self.db, self.policy) def test_index_unauthorized(self): rules = {"get_tasks": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.index, request) def test_get_unauthorized(self): rules = {"get_task": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPForbidden, self.controller.get, request, task_id=UUID2) def test_create_task_unauthorized(self): rules = {"add_task": False} self.policy.set_rules(rules) request = unit_test_utils.get_fake_request() task = {'type': 'import', 'input': {"import_from": "fake"}} self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, request, task) def test_delete(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPMethodNotAllowed, self.controller.delete, request, 'fake_id') class TestTasksDeserializer(test_utils.BaseTestCase): def setUp(self): super(TestTasksDeserializer, self).setUp() self.deserializer = glance.api.v2.tasks.RequestDeserializer() def test_create_no_body(self): request = unit_test_utils.get_fake_request() self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.create, request) def test_create(self): request = unit_test_utils.get_fake_request() request.body = jsonutils.dump_as_bytes({ 'type': 'import', 'input': {'import_from': 'swift://cloud.foo/myaccount/mycontainer/path', 'import_from_format': 'qcow2', 'image_properties': {'name': 'fake1'}}, }) output = self.deserializer.create(request) properties = { 'type': 'import', 'input': {'import_from': 'swift://cloud.foo/myaccount/mycontainer/path', 'import_from_format': 'qcow2', 'image_properties': {'name': 'fake1'}}, } self.maxDiff = None expected = {'task': properties} self.assertEqual(expected, output) def test_index(self): marker = str(uuid.uuid4()) path = '/tasks?limit=1&marker=%s' % marker request = unit_test_utils.get_fake_request(path) expected = {'limit': 1, 'marker': marker, 'sort_key': 'created_at', 'sort_dir': 'desc', 'filters': {}} output = self.deserializer.index(request) self.assertEqual(expected, output) def test_index_strip_params_from_filters(self): type = 'import' path = '/tasks?type=%s' % type request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(type, output['filters']['type']) def test_index_with_many_filter(self): status = 'success' type = 'import' path = '/tasks?status=%(status)s&type=%(type)s' % {'status': status, 'type': type} request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(status, output['filters']['status']) self.assertEqual(type, output['filters']['type']) def test_index_with_filter_and_limit(self): status = 'success' path = '/tasks?status=%s&limit=1' % status request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(status, output['filters']['status']) self.assertEqual(1, output['limit']) def test_index_non_integer_limit(self): request = unit_test_utils.get_fake_request('/tasks?limit=blah') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_zero_limit(self): request = unit_test_utils.get_fake_request('/tasks?limit=0') expected = {'limit': 0, 'sort_key': 'created_at', 'sort_dir': 'desc', 'filters': {}} output = self.deserializer.index(request) self.assertEqual(expected, output) def test_index_negative_limit(self): path = '/tasks?limit=-1' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_fraction(self): request = unit_test_utils.get_fake_request('/tasks?limit=1.1') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_invalid_status(self): path = '/tasks?status=blah' request = unit_test_utils.get_fake_request(path) self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) def test_index_marker(self): marker = str(uuid.uuid4()) path = '/tasks?marker=%s' % marker request = unit_test_utils.get_fake_request(path) output = self.deserializer.index(request) self.assertEqual(marker, output.get('marker')) def test_index_marker_not_specified(self): request = unit_test_utils.get_fake_request('/tasks') output = self.deserializer.index(request) self.assertNotIn('marker', output) def test_index_limit_not_specified(self): request = unit_test_utils.get_fake_request('/tasks') output = self.deserializer.index(request) self.assertNotIn('limit', output) def test_index_sort_key_id(self): request = unit_test_utils.get_fake_request('/tasks?sort_key=id') output = self.deserializer.index(request) expected = { 'sort_key': 'id', 'sort_dir': 'desc', 'filters': {} } self.assertEqual(expected, output) def test_index_sort_dir_asc(self): request = unit_test_utils.get_fake_request('/tasks?sort_dir=asc') output = self.deserializer.index(request) expected = { 'sort_key': 'created_at', 'sort_dir': 'asc', 'filters': {}} self.assertEqual(expected, output) def test_index_sort_dir_bad_value(self): request = unit_test_utils.get_fake_request('/tasks?sort_dir=invalid') self.assertRaises(webob.exc.HTTPBadRequest, self.deserializer.index, request) class TestTasksSerializer(test_utils.BaseTestCase): def setUp(self): super(TestTasksSerializer, self).setUp() self.serializer = glance.api.v2.tasks.ResponseSerializer() self.fixtures = [ _domain_fixture(UUID1, type='import', status='pending', task_input={'loc': 'fake'}, result={}, owner=TENANT1, message='', created_at=DATETIME, updated_at=DATETIME), _domain_fixture(UUID2, type='import', status='processing', task_input={'loc': 'bake'}, owner=TENANT2, message='', created_at=DATETIME, updated_at=DATETIME, result={}), _domain_fixture(UUID3, type='import', status='success', task_input={'loc': 'foo'}, owner=TENANT3, message='', created_at=DATETIME, updated_at=DATETIME, result={}, expires_at=DATETIME), _domain_fixture(UUID4, type='import', status='failure', task_input={'loc': 'boo'}, owner=TENANT4, message='', created_at=DATETIME, updated_at=DATETIME, result={}, expires_at=DATETIME), ] def test_index(self): expected = { 'tasks': [ { 'id': UUID1, 'type': 'import', 'status': 'pending', 'owner': TENANT1, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID1, 'schema': '/v2/schemas/task', }, { 'id': UUID2, 'type': 'import', 'status': 'processing', 'owner': TENANT2, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID2, 'schema': '/v2/schemas/task', }, { 'id': UUID3, 'type': 'import', 'status': 'success', 'owner': TENANT3, 'expires_at': ISOTIME, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID3, 'schema': '/v2/schemas/task', }, { 'id': UUID4, 'type': 'import', 'status': 'failure', 'owner': TENANT4, 'expires_at': ISOTIME, 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID4, 'schema': '/v2/schemas/task', }, ], 'first': '/v2/tasks', 'schema': '/v2/schemas/tasks', } request = webob.Request.blank('/v2/tasks') response = webob.Response(request=request) task_fixtures = [f for f in self.fixtures] result = {'tasks': task_fixtures} self.serializer.index(response, result) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_index_next_marker(self): request = webob.Request.blank('/v2/tasks') response = webob.Response(request=request) task_fixtures = [f for f in self.fixtures] result = {'tasks': task_fixtures, 'next_marker': UUID2} self.serializer.index(response, result) output = jsonutils.loads(response.body) self.assertEqual('/v2/tasks?marker=%s' % UUID2, output['next']) def test_index_carries_query_parameters(self): url = '/v2/tasks?limit=10&sort_key=id&sort_dir=asc' request = webob.Request.blank(url) response = webob.Response(request=request) task_fixtures = [f for f in self.fixtures] result = {'tasks': task_fixtures, 'next_marker': UUID2} self.serializer.index(response, result) output = jsonutils.loads(response.body) expected_url = '/v2/tasks?limit=10&sort_dir=asc&sort_key=id' self.assertEqual(unit_test_utils.sort_url_by_qs_keys(expected_url), unit_test_utils.sort_url_by_qs_keys(output['first'])) expect_next = '/v2/tasks?limit=10&marker=%s&sort_dir=asc&sort_key=id' self.assertEqual(unit_test_utils.sort_url_by_qs_keys( expect_next % UUID2), unit_test_utils.sort_url_by_qs_keys(output['next'])) def test_get(self): expected = { 'id': UUID4, 'type': 'import', 'status': 'failure', 'input': {'loc': 'boo'}, 'result': {}, 'owner': TENANT4, 'message': '', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'expires_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID4, 'schema': '/v2/schemas/task', } response = webob.Response() self.serializer.get(response, self.fixtures[3]) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_get_ensure_expires_at_not_returned(self): expected = { 'id': UUID1, 'type': 'import', 'status': 'pending', 'input': {'loc': 'fake'}, 'result': {}, 'owner': TENANT1, 'message': '', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID1, 'schema': '/v2/schemas/task', } response = webob.Response() self.serializer.get(response, self.fixtures[0]) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) expected = { 'id': UUID2, 'type': 'import', 'status': 'processing', 'input': {'loc': 'bake'}, 'result': {}, 'owner': TENANT2, 'message': '', 'created_at': ISOTIME, 'updated_at': ISOTIME, 'self': '/v2/tasks/%s' % UUID2, 'schema': '/v2/schemas/task', } response = webob.Response() self.serializer.get(response, self.fixtures[1]) actual = jsonutils.loads(response.body) self.assertEqual(expected, actual) self.assertEqual('application/json', response.content_type) def test_create(self): response = webob.Response() self.serializer.create(response, self.fixtures[3]) serialized_task = jsonutils.loads(response.body) self.assertEqual(201, response.status_int) self.assertEqual(self.fixtures[3].task_id, serialized_task['id']) self.assertEqual(self.fixtures[3].task_input, serialized_task['input']) self.assertIn('expires_at', serialized_task) self.assertEqual('application/json', response.content_type) def test_create_ensure_expires_at_is_not_returned(self): response = webob.Response() self.serializer.create(response, self.fixtures[0]) serialized_task = jsonutils.loads(response.body) self.assertEqual(201, response.status_int) self.assertEqual(self.fixtures[0].task_id, serialized_task['id']) self.assertEqual(self.fixtures[0].task_input, serialized_task['input']) self.assertNotIn('expires_at', serialized_task) self.assertEqual('application/json', response.content_type) response = webob.Response() self.serializer.create(response, self.fixtures[1]) serialized_task = jsonutils.loads(response.body) self.assertEqual(201, response.status_int) self.assertEqual(self.fixtures[1].task_id, serialized_task['id']) self.assertEqual(self.fixtures[1].task_input, serialized_task['input']) self.assertNotIn('expires_at', serialized_task) self.assertEqual('application/json', response.content_type) glance-12.0.0/glance/tests/unit/test_domain.py0000664000567000056710000005511212701407047022424 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_config import cfg import oslo_utils.importutils import glance.async from glance.async import taskflow_executor from glance.common import exception from glance.common.glare import definitions from glance.common import timeutils from glance import domain from glance.glare import domain as artifacts_domain import glance.tests.utils as test_utils CONF = cfg.CONF UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' class TestImageFactory(test_utils.BaseTestCase): def setUp(self): super(TestImageFactory, self).setUp() self.image_factory = domain.ImageFactory() def test_minimal_new_image(self): image = self.image_factory.new_image() self.assertIsNotNone(image.image_id) self.assertIsNotNone(image.created_at) self.assertEqual(image.created_at, image.updated_at) self.assertEqual('queued', image.status) self.assertEqual('private', image.visibility) self.assertIsNone(image.owner) self.assertIsNone(image.name) self.assertIsNone(image.size) self.assertEqual(0, image.min_disk) self.assertEqual(0, image.min_ram) self.assertFalse(image.protected) self.assertIsNone(image.disk_format) self.assertIsNone(image.container_format) self.assertEqual({}, image.extra_properties) self.assertEqual(set([]), image.tags) def test_new_image(self): image = self.image_factory.new_image( image_id=UUID1, name='image-1', min_disk=256, owner=TENANT1) self.assertEqual(UUID1, image.image_id) self.assertIsNotNone(image.created_at) self.assertEqual(image.created_at, image.updated_at) self.assertEqual('queued', image.status) self.assertEqual('private', image.visibility) self.assertEqual(TENANT1, image.owner) self.assertEqual('image-1', image.name) self.assertIsNone(image.size) self.assertEqual(256, image.min_disk) self.assertEqual(0, image.min_ram) self.assertFalse(image.protected) self.assertIsNone(image.disk_format) self.assertIsNone(image.container_format) self.assertEqual({}, image.extra_properties) self.assertEqual(set([]), image.tags) def test_new_image_with_extra_properties_and_tags(self): extra_properties = {'foo': 'bar'} tags = ['one', 'two'] image = self.image_factory.new_image( image_id=UUID1, name='image-1', extra_properties=extra_properties, tags=tags) self.assertEqual(UUID1, image.image_id, UUID1) self.assertIsNotNone(image.created_at) self.assertEqual(image.created_at, image.updated_at) self.assertEqual('queued', image.status) self.assertEqual('private', image.visibility) self.assertIsNone(image.owner) self.assertEqual('image-1', image.name) self.assertIsNone(image.size) self.assertEqual(0, image.min_disk) self.assertEqual(0, image.min_ram) self.assertFalse(image.protected) self.assertIsNone(image.disk_format) self.assertIsNone(image.container_format) self.assertEqual({'foo': 'bar'}, image.extra_properties) self.assertEqual(set(['one', 'two']), image.tags) def test_new_image_read_only_property(self): self.assertRaises(exception.ReadonlyProperty, self.image_factory.new_image, image_id=UUID1, name='image-1', size=256) def test_new_image_unexpected_property(self): self.assertRaises(TypeError, self.image_factory.new_image, image_id=UUID1, image_name='name-1') def test_new_image_reserved_property(self): extra_properties = {'deleted': True} self.assertRaises(exception.ReservedProperty, self.image_factory.new_image, image_id=UUID1, extra_properties=extra_properties) def test_new_image_for_is_public(self): extra_prop = {'is_public': True} new_image = self.image_factory.new_image(image_id=UUID1, extra_properties=extra_prop) self.assertEqual(True, new_image.extra_properties['is_public']) class TestImage(test_utils.BaseTestCase): def setUp(self): super(TestImage, self).setUp() self.image_factory = domain.ImageFactory() self.image = self.image_factory.new_image( container_format='bear', disk_format='rawr') def test_extra_properties(self): self.image.extra_properties = {'foo': 'bar'} self.assertEqual({'foo': 'bar'}, self.image.extra_properties) def test_extra_properties_assign(self): self.image.extra_properties['foo'] = 'bar' self.assertEqual({'foo': 'bar'}, self.image.extra_properties) def test_delete_extra_properties(self): self.image.extra_properties = {'foo': 'bar'} self.assertEqual({'foo': 'bar'}, self.image.extra_properties) del self.image.extra_properties['foo'] self.assertEqual({}, self.image.extra_properties) def test_visibility_enumerated(self): self.image.visibility = 'public' self.image.visibility = 'private' self.assertRaises(ValueError, setattr, self.image, 'visibility', 'ellison') def test_tags_always_a_set(self): self.image.tags = ['a', 'b', 'c'] self.assertEqual(set(['a', 'b', 'c']), self.image.tags) def test_delete_protected_image(self): self.image.protected = True self.assertRaises(exception.ProtectedImageDelete, self.image.delete) def test_status_saving(self): self.image.status = 'saving' self.assertEqual('saving', self.image.status) def test_set_incorrect_status(self): self.image.status = 'saving' self.image.status = 'killed' self.assertRaises( exception.InvalidImageStatusTransition, setattr, self.image, 'status', 'delet') def test_status_saving_without_disk_format(self): self.image.disk_format = None self.assertRaises(ValueError, setattr, self.image, 'status', 'saving') def test_status_saving_without_container_format(self): self.image.container_format = None self.assertRaises(ValueError, setattr, self.image, 'status', 'saving') def test_status_active_without_disk_format(self): self.image.disk_format = None self.assertRaises(ValueError, setattr, self.image, 'status', 'active') def test_status_active_without_container_format(self): self.image.container_format = None self.assertRaises(ValueError, setattr, self.image, 'status', 'active') def test_delayed_delete(self): self.config(delayed_delete=True) self.image.status = 'active' self.image.locations = [{'url': 'http://foo.bar/not.exists', 'metadata': {}}] self.assertEqual('active', self.image.status) self.image.delete() self.assertEqual('pending_delete', self.image.status) class TestImageMember(test_utils.BaseTestCase): def setUp(self): super(TestImageMember, self).setUp() self.image_member_factory = domain.ImageMemberFactory() self.image_factory = domain.ImageFactory() self.image = self.image_factory.new_image() self.image_member = self.image_member_factory.new_image_member( image=self.image, member_id=TENANT1) def test_status_enumerated(self): self.image_member.status = 'pending' self.image_member.status = 'accepted' self.image_member.status = 'rejected' self.assertRaises(ValueError, setattr, self.image_member, 'status', 'ellison') class TestImageMemberFactory(test_utils.BaseTestCase): def setUp(self): super(TestImageMemberFactory, self).setUp() self.image_member_factory = domain.ImageMemberFactory() self.image_factory = domain.ImageFactory() def test_minimal_new_image_member(self): member_id = 'fake-member-id' image = self.image_factory.new_image( image_id=UUID1, name='image-1', min_disk=256, owner=TENANT1) image_member = self.image_member_factory.new_image_member(image, member_id) self.assertEqual(image_member.image_id, image.image_id) self.assertIsNotNone(image_member.created_at) self.assertEqual(image_member.created_at, image_member.updated_at) self.assertEqual('pending', image_member.status) self.assertIsNotNone(image_member.member_id) class TestExtraProperties(test_utils.BaseTestCase): def test_getitem(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) self.assertEqual('bar', extra_properties['foo']) self.assertEqual('golden', extra_properties['snitch']) def test_getitem_with_no_items(self): extra_properties = domain.ExtraProperties() self.assertRaises(KeyError, extra_properties.__getitem__, 'foo') def test_setitem(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) extra_properties['foo'] = 'baz' self.assertEqual('baz', extra_properties['foo']) def test_delitem(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) del extra_properties['foo'] self.assertRaises(KeyError, extra_properties.__getitem__, 'foo') self.assertEqual('golden', extra_properties['snitch']) def test_len_with_zero_items(self): extra_properties = domain.ExtraProperties() self.assertEqual(0, len(extra_properties)) def test_len_with_non_zero_items(self): extra_properties = domain.ExtraProperties() extra_properties['foo'] = 'bar' extra_properties['snitch'] = 'golden' self.assertEqual(2, len(extra_properties)) def test_eq_with_a_dict(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) ref_extra_properties = {'foo': 'bar', 'snitch': 'golden'} self.assertEqual(ref_extra_properties, extra_properties) def test_eq_with_an_object_of_ExtraProperties(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) ref_extra_properties = domain.ExtraProperties() ref_extra_properties['snitch'] = 'golden' ref_extra_properties['foo'] = 'bar' self.assertEqual(ref_extra_properties, extra_properties) def test_eq_with_uneqal_dict(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) ref_extra_properties = {'boo': 'far', 'gnitch': 'solden'} self.assertFalse(extra_properties.__eq__(ref_extra_properties)) def test_eq_with_unequal_ExtraProperties_object(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) ref_extra_properties = domain.ExtraProperties() ref_extra_properties['gnitch'] = 'solden' ref_extra_properties['boo'] = 'far' self.assertFalse(extra_properties.__eq__(ref_extra_properties)) def test_eq_with_incompatible_object(self): a_dict = {'foo': 'bar', 'snitch': 'golden'} extra_properties = domain.ExtraProperties(a_dict) random_list = ['foo', 'bar'] self.assertFalse(extra_properties.__eq__(random_list)) class TestTaskFactory(test_utils.BaseTestCase): def setUp(self): super(TestTaskFactory, self).setUp() self.task_factory = domain.TaskFactory() def test_new_task(self): task_type = 'import' owner = TENANT1 task_input = 'input' task = self.task_factory.new_task(task_type, owner, task_input=task_input, result='test_result', message='test_message') self.assertIsNotNone(task.task_id) self.assertIsNotNone(task.created_at) self.assertEqual(task_type, task.type) self.assertEqual(task.created_at, task.updated_at) self.assertEqual('pending', task.status) self.assertIsNone(task.expires_at) self.assertEqual(owner, task.owner) self.assertEqual(task_input, task.task_input) self.assertEqual('test_message', task.message) self.assertEqual('test_result', task.result) def test_new_task_invalid_type(self): task_type = 'blah' owner = TENANT1 self.assertRaises( exception.InvalidTaskType, self.task_factory.new_task, task_type, owner, ) class TestTask(test_utils.BaseTestCase): def setUp(self): super(TestTask, self).setUp() self.task_factory = domain.TaskFactory() task_type = 'import' owner = TENANT1 task_ttl = CONF.task.task_time_to_live self.task = self.task_factory.new_task(task_type, owner, task_time_to_live=task_ttl) def test_task_invalid_status(self): task_id = str(uuid.uuid4()) status = 'blah' self.assertRaises( exception.InvalidTaskStatus, domain.Task, task_id, task_type='import', status=status, owner=None, expires_at=None, created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), task_input=None, message=None, result=None ) def test_validate_status_transition_from_pending(self): self.task.begin_processing() self.assertEqual('processing', self.task.status) def test_validate_status_transition_from_processing_to_success(self): self.task.begin_processing() self.task.succeed('') self.assertEqual('success', self.task.status) def test_validate_status_transition_from_processing_to_failure(self): self.task.begin_processing() self.task.fail('') self.assertEqual('failure', self.task.status) def test_invalid_status_transitions_from_pending(self): # test do not allow transition from pending to success self.assertRaises( exception.InvalidTaskStatusTransition, self.task.succeed, '' ) def test_invalid_status_transitions_from_success(self): # test do not allow transition from success to processing self.task.begin_processing() self.task.succeed('') self.assertRaises( exception.InvalidTaskStatusTransition, self.task.begin_processing ) # test do not allow transition from success to failure self.assertRaises( exception.InvalidTaskStatusTransition, self.task.fail, '' ) def test_invalid_status_transitions_from_failure(self): # test do not allow transition from failure to processing self.task.begin_processing() self.task.fail('') self.assertRaises( exception.InvalidTaskStatusTransition, self.task.begin_processing ) # test do not allow transition from failure to success self.assertRaises( exception.InvalidTaskStatusTransition, self.task.succeed, '' ) def test_begin_processing(self): self.task.begin_processing() self.assertEqual('processing', self.task.status) @mock.patch.object(timeutils, 'utcnow') def test_succeed(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() self.task.begin_processing() self.task.succeed('{"location": "file://home"}') self.assertEqual('success', self.task.status) self.assertEqual('{"location": "file://home"}', self.task.result) self.assertEqual(u'', self.task.message) expected = (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live)) self.assertEqual( expected, self.task.expires_at ) @mock.patch.object(timeutils, 'utcnow') def test_fail(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() self.task.begin_processing() self.task.fail('{"message": "connection failed"}') self.assertEqual('failure', self.task.status) self.assertEqual('{"message": "connection failed"}', self.task.message) self.assertIsNone(self.task.result) expected = (timeutils.utcnow() + datetime.timedelta(hours=CONF.task.task_time_to_live)) self.assertEqual( expected, self.task.expires_at ) @mock.patch.object(glance.async.TaskExecutor, 'begin_processing') def test_run(self, mock_begin_processing): executor = glance.async.TaskExecutor(context=mock.ANY, task_repo=mock.ANY, image_repo=mock.ANY, image_factory=mock.ANY) self.task.run(executor) mock_begin_processing.assert_called_once_with(self.task.task_id) class TestTaskStub(test_utils.BaseTestCase): def setUp(self): super(TestTaskStub, self).setUp() self.task_id = str(uuid.uuid4()) self.task_type = 'import' self.owner = TENANT1 self.task_ttl = CONF.task.task_time_to_live def test_task_stub_init(self): self.task_factory = domain.TaskFactory() task = domain.TaskStub( self.task_id, self.task_type, 'status', self.owner, 'expires_at', 'created_at', 'updated_at' ) self.assertEqual(self.task_id, task.task_id) self.assertEqual(self.task_type, task.type) self.assertEqual(self.owner, task.owner) self.assertEqual('status', task.status) self.assertEqual('expires_at', task.expires_at) self.assertEqual('created_at', task.created_at) self.assertEqual('updated_at', task.updated_at) def test_task_stub_get_status(self): status = 'pending' task = domain.TaskStub( self.task_id, self.task_type, status, self.owner, 'expires_at', 'created_at', 'updated_at' ) self.assertEqual(status, task.status) class TestTaskExecutorFactory(test_utils.BaseTestCase): def setUp(self): super(TestTaskExecutorFactory, self).setUp() self.task_repo = mock.Mock() self.image_repo = mock.Mock() self.image_factory = mock.Mock() def test_init(self): task_executor_factory = domain.TaskExecutorFactory(self.task_repo, self.image_repo, self.image_factory) self.assertEqual(self.task_repo, task_executor_factory.task_repo) def test_new_task_executor(self): task_executor_factory = domain.TaskExecutorFactory(self.task_repo, self.image_repo, self.image_factory) context = mock.Mock() with mock.patch.object(oslo_utils.importutils, 'import_class') as mock_import_class: mock_executor = mock.Mock() mock_import_class.return_value = mock_executor task_executor_factory.new_task_executor(context) mock_executor.assert_called_once_with(context, self.task_repo, self.image_repo, self.image_factory) def test_new_task_executor_error(self): task_executor_factory = domain.TaskExecutorFactory(self.task_repo, self.image_repo, self.image_factory) context = mock.Mock() with mock.patch.object(oslo_utils.importutils, 'import_class') as mock_import_class: mock_import_class.side_effect = ImportError self.assertRaises(ImportError, task_executor_factory.new_task_executor, context) def test_new_task_eventlet_backwards_compatibility(self): context = mock.MagicMock() self.config(task_executor='eventlet', group='task') task_executor_factory = domain.TaskExecutorFactory(self.task_repo, self.image_repo, self.image_factory) # NOTE(flaper87): "eventlet" executor. short name to avoid > 79. te_evnt = task_executor_factory.new_task_executor(context) self.assertIsInstance(te_evnt, taskflow_executor.TaskExecutor) class TestArtifact(definitions.ArtifactType): prop1 = definitions.Dict() prop2 = definitions.Integer(min_value=10) class TestArtifactTypeFactory(test_utils.BaseTestCase): def setUp(self): super(TestArtifactTypeFactory, self).setUp() context = mock.Mock(owner='me') self.factory = artifacts_domain.ArtifactFactory(context, TestArtifact) def test_new_artifact_min_params(self): artifact = self.factory.new_artifact("foo", "1.0.0-alpha") self.assertEqual('creating', artifact.state) self.assertEqual('me', artifact.owner) self.assertTrue(artifact.id is not None) glance-12.0.0/glance/tests/unit/test_auth.py0000664000567000056710000011406112701407047022115 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslotest import moxstubout import webob from glance.api import authorization from glance.common import auth from glance.common import exception from glance.common import timeutils import glance.domain from glance.tests.unit import utils as unittest_utils from glance.tests import utils TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = 'a85abd86-55b3-4d5b-b0b4-5d0a6e6042fc' class FakeResponse(object): """ Simple class that masks the inconsistency between webob.Response.status_int and httplib.Response.status """ def __init__(self, resp): self.resp = resp def __getitem__(self, key): return self.resp.headers.get(key) @property def status(self): return self.resp.status_int class V2Token(object): def __init__(self): self.tok = self.base_token def add_service_no_type(self): catalog = self.tok['access']['serviceCatalog'] service_type = {"name": "glance_no_type"} catalog.append(service_type) service = catalog[-1] service['endpoints'] = [self.base_endpoint] def add_service(self, s_type, region_list=None): if region_list is None: region_list = [] catalog = self.tok['access']['serviceCatalog'] service_type = {"type": s_type, "name": "glance"} catalog.append(service_type) service = catalog[-1] endpoint_list = [] if region_list == []: endpoint_list.append(self.base_endpoint) else: for region in region_list: endpoint = self.base_endpoint endpoint['region'] = region endpoint_list.append(endpoint) service['endpoints'] = endpoint_list @property def token(self): return self.tok @property def base_endpoint(self): return { "adminURL": "http://localhost:9292", "internalURL": "http://localhost:9292", "publicURL": "http://localhost:9292" } @property def base_token(self): return { "access": { "token": { "expires": "2010-11-23T16:40:53.321584", "id": "5c7f8799-2e54-43e4-851b-31f81871b6c", "tenant": {"id": "1", "name": "tenant-ok"} }, "serviceCatalog": [ ], "user": { "id": "2", "roles": [{ "tenantId": "1", "id": "1", "name": "Admin" }], "name": "joeadmin" } } } class TestKeystoneAuthPlugin(utils.BaseTestCase): """Test that the Keystone auth plugin works properly""" def setUp(self): super(TestKeystoneAuthPlugin, self).setUp() mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.stubs = mox_fixture.stubs def test_get_plugin_from_strategy_keystone(self): strategy = auth.get_plugin_from_strategy('keystone') self.assertIsInstance(strategy, auth.KeystoneStrategy) self.assertTrue(strategy.configure_via_auth) def test_get_plugin_from_strategy_keystone_configure_via_auth_false(self): strategy = auth.get_plugin_from_strategy('keystone', configure_via_auth=False) self.assertIsInstance(strategy, auth.KeystoneStrategy) self.assertFalse(strategy.configure_via_auth) def test_required_creds(self): """ Test that plugin created without required credential pieces raises an exception """ bad_creds = [ {}, # missing everything { 'username': 'user1', 'strategy': 'keystone', 'password': 'pass' }, # missing auth_url { 'password': 'pass', 'strategy': 'keystone', 'auth_url': 'http://localhost/v1' }, # missing username { 'username': 'user1', 'strategy': 'keystone', 'auth_url': 'http://localhost/v1' }, # missing password { 'username': 'user1', 'password': 'pass', 'auth_url': 'http://localhost/v1' }, # missing strategy { 'username': 'user1', 'password': 'pass', 'strategy': 'keystone', 'auth_url': 'http://localhost/v2.0/' }, # v2.0: missing tenant { 'username': None, 'password': 'pass', 'auth_url': 'http://localhost/v2.0/' }, # None parameter { 'username': 'user1', 'password': 'pass', 'auth_url': 'http://localhost/v2.0/', 'tenant': None } # None tenant ] for creds in bad_creds: try: plugin = auth.KeystoneStrategy(creds) plugin.authenticate() self.fail("Failed to raise correct exception when supplying " "bad credentials: %r" % creds) except exception.MissingCredentialError: continue # Expected def test_invalid_auth_url_v1(self): """ Test that a 400 during authenticate raises exception.AuthBadRequest """ def fake_do_request(*args, **kwargs): resp = webob.Response() resp.status = 400 return FakeResponse(resp), "" self.stubs.Set(auth.KeystoneStrategy, '_do_request', fake_do_request) bad_creds = { 'username': 'user1', 'auth_url': 'http://localhost/badauthurl/', 'password': 'pass', 'strategy': 'keystone', 'region': 'RegionOne' } plugin = auth.KeystoneStrategy(bad_creds) self.assertRaises(exception.AuthBadRequest, plugin.authenticate) def test_invalid_auth_url_v2(self): """ Test that a 400 during authenticate raises exception.AuthBadRequest """ def fake_do_request(*args, **kwargs): resp = webob.Response() resp.status = 400 return FakeResponse(resp), "" self.stubs.Set(auth.KeystoneStrategy, '_do_request', fake_do_request) bad_creds = { 'username': 'user1', 'auth_url': 'http://localhost/badauthurl/v2.0/', 'password': 'pass', 'tenant': 'tenant1', 'strategy': 'keystone', 'region': 'RegionOne' } plugin = auth.KeystoneStrategy(bad_creds) self.assertRaises(exception.AuthBadRequest, plugin.authenticate) def test_v1_auth(self): """Test v1 auth code paths""" def fake_do_request(cls, url, method, headers=None, body=None): if url.find("2.0") != -1: self.fail("Invalid v1.0 token path (%s)" % url) headers = headers or {} resp = webob.Response() if (headers.get('X-Auth-User') != 'user1' or headers.get('X-Auth-Key') != 'pass'): resp.status = 401 else: resp.status = 200 resp.headers.update({"x-image-management-url": "example.com"}) return FakeResponse(resp), "" self.stubs.Set(auth.KeystoneStrategy, '_do_request', fake_do_request) unauthorized_creds = [ { 'username': 'wronguser', 'auth_url': 'http://localhost/badauthurl/', 'strategy': 'keystone', 'region': 'RegionOne', 'password': 'pass' }, # wrong username { 'username': 'user1', 'auth_url': 'http://localhost/badauthurl/', 'strategy': 'keystone', 'region': 'RegionOne', 'password': 'badpass' }, # bad password... ] for creds in unauthorized_creds: try: plugin = auth.KeystoneStrategy(creds) plugin.authenticate() self.fail("Failed to raise NotAuthenticated when supplying " "bad credentials: %r" % creds) except exception.NotAuthenticated: continue # Expected no_strategy_creds = { 'username': 'user1', 'auth_url': 'http://localhost/redirect/', 'password': 'pass', 'region': 'RegionOne' } try: plugin = auth.KeystoneStrategy(no_strategy_creds) plugin.authenticate() self.fail("Failed to raise MissingCredentialError when " "supplying no strategy: %r" % no_strategy_creds) except exception.MissingCredentialError: pass # Expected good_creds = [ { 'username': 'user1', 'auth_url': 'http://localhost/redirect/', 'password': 'pass', 'strategy': 'keystone', 'region': 'RegionOne' } ] for creds in good_creds: plugin = auth.KeystoneStrategy(creds) self.assertIsNone(plugin.authenticate()) self.assertEqual("example.com", plugin.management_url) # Assert it does not update management_url via auth response for creds in good_creds: plugin = auth.KeystoneStrategy(creds, configure_via_auth=False) self.assertIsNone(plugin.authenticate()) self.assertIsNone(plugin.management_url) def test_v2_auth(self): """Test v2 auth code paths""" mock_token = None def fake_do_request(cls, url, method, headers=None, body=None): if (not url.rstrip('/').endswith('v2.0/tokens') or url.count("2.0") != 1): self.fail("Invalid v2.0 token path (%s)" % url) creds = jsonutils.loads(body)['auth'] username = creds['passwordCredentials']['username'] password = creds['passwordCredentials']['password'] tenant = creds['tenantName'] resp = webob.Response() if (username != 'user1' or password != 'pass' or tenant != 'tenant-ok'): resp.status = 401 else: resp.status = 200 body = mock_token.token return FakeResponse(resp), jsonutils.dumps(body) mock_token = V2Token() mock_token.add_service('image', ['RegionOne']) self.stubs.Set(auth.KeystoneStrategy, '_do_request', fake_do_request) unauthorized_creds = [ { 'username': 'wronguser', 'auth_url': 'http://localhost/v2.0', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' }, # wrong username { 'username': 'user1', 'auth_url': 'http://localhost/v2.0', 'password': 'badpass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' }, # bad password... { 'username': 'user1', 'auth_url': 'http://localhost/v2.0', 'password': 'pass', 'tenant': 'carterhayes', 'strategy': 'keystone', 'region': 'RegionOne' }, # bad tenant... ] for creds in unauthorized_creds: try: plugin = auth.KeystoneStrategy(creds) plugin.authenticate() self.fail("Failed to raise NotAuthenticated when supplying " "bad credentials: %r" % creds) except exception.NotAuthenticated: continue # Expected no_region_creds = { 'username': 'user1', 'tenant': 'tenant-ok', 'auth_url': 'http://localhost/redirect/v2.0/', 'password': 'pass', 'strategy': 'keystone' } plugin = auth.KeystoneStrategy(no_region_creds) self.assertIsNone(plugin.authenticate()) self.assertEqual('http://localhost:9292', plugin.management_url) # Add another image service, with a different region mock_token.add_service('image', ['RegionTwo']) try: plugin = auth.KeystoneStrategy(no_region_creds) plugin.authenticate() self.fail("Failed to raise RegionAmbiguity when no region present " "and multiple regions exist: %r" % no_region_creds) except exception.RegionAmbiguity: pass # Expected wrong_region_creds = { 'username': 'user1', 'tenant': 'tenant-ok', 'auth_url': 'http://localhost/redirect/v2.0/', 'password': 'pass', 'strategy': 'keystone', 'region': 'NonExistentRegion' } try: plugin = auth.KeystoneStrategy(wrong_region_creds) plugin.authenticate() self.fail("Failed to raise NoServiceEndpoint when supplying " "wrong region: %r" % wrong_region_creds) except exception.NoServiceEndpoint: pass # Expected no_strategy_creds = { 'username': 'user1', 'tenant': 'tenant-ok', 'auth_url': 'http://localhost/redirect/v2.0/', 'password': 'pass', 'region': 'RegionOne' } try: plugin = auth.KeystoneStrategy(no_strategy_creds) plugin.authenticate() self.fail("Failed to raise MissingCredentialError when " "supplying no strategy: %r" % no_strategy_creds) except exception.MissingCredentialError: pass # Expected bad_strategy_creds = { 'username': 'user1', 'tenant': 'tenant-ok', 'auth_url': 'http://localhost/redirect/v2.0/', 'password': 'pass', 'region': 'RegionOne', 'strategy': 'keypebble' } try: plugin = auth.KeystoneStrategy(bad_strategy_creds) plugin.authenticate() self.fail("Failed to raise BadAuthStrategy when supplying " "bad auth strategy: %r" % bad_strategy_creds) except exception.BadAuthStrategy: pass # Expected mock_token = V2Token() mock_token.add_service('image', ['RegionOne', 'RegionTwo']) good_creds = [ { 'username': 'user1', 'auth_url': 'http://localhost/v2.0/', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' }, # auth_url with trailing '/' { 'username': 'user1', 'auth_url': 'http://localhost/v2.0', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' }, # auth_url without trailing '/' { 'username': 'user1', 'auth_url': 'http://localhost/v2.0', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionTwo' } # Second region ] for creds in good_creds: plugin = auth.KeystoneStrategy(creds) self.assertIsNone(plugin.authenticate()) self.assertEqual('http://localhost:9292', plugin.management_url) ambiguous_region_creds = { 'username': 'user1', 'auth_url': 'http://localhost/v2.0/', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' } mock_token = V2Token() # Add two identical services mock_token.add_service('image', ['RegionOne']) mock_token.add_service('image', ['RegionOne']) try: plugin = auth.KeystoneStrategy(ambiguous_region_creds) plugin.authenticate() self.fail("Failed to raise RegionAmbiguity when " "non-unique regions exist: %r" % ambiguous_region_creds) except exception.RegionAmbiguity: pass mock_token = V2Token() mock_token.add_service('bad-image', ['RegionOne']) good_creds = { 'username': 'user1', 'auth_url': 'http://localhost/v2.0/', 'password': 'pass', 'tenant': 'tenant-ok', 'strategy': 'keystone', 'region': 'RegionOne' } try: plugin = auth.KeystoneStrategy(good_creds) plugin.authenticate() self.fail("Failed to raise NoServiceEndpoint when bad service " "type encountered") except exception.NoServiceEndpoint: pass mock_token = V2Token() mock_token.add_service_no_type() try: plugin = auth.KeystoneStrategy(good_creds) plugin.authenticate() self.fail("Failed to raise NoServiceEndpoint when bad service " "type encountered") except exception.NoServiceEndpoint: pass try: plugin = auth.KeystoneStrategy(good_creds, configure_via_auth=False) plugin.authenticate() except exception.NoServiceEndpoint: self.fail("NoServiceEndpoint was raised when authenticate " "should not check for endpoint.") class TestEndpoints(utils.BaseTestCase): def setUp(self): super(TestEndpoints, self).setUp() self.service_catalog = [ { 'endpoint_links': [], 'endpoints': [ { 'adminURL': 'http://localhost:8080/', 'region': 'RegionOne', 'internalURL': 'http://internalURL/', 'publicURL': 'http://publicURL/', }, ], 'type': 'object-store', 'name': 'Object Storage Service', } ] def test_get_endpoint_with_custom_server_type(self): endpoint = auth.get_endpoint(self.service_catalog, service_type='object-store') self.assertEqual('http://publicURL/', endpoint) def test_get_endpoint_with_custom_endpoint_type(self): endpoint = auth.get_endpoint(self.service_catalog, service_type='object-store', endpoint_type='internalURL') self.assertEqual('http://internalURL/', endpoint) def test_get_endpoint_raises_with_invalid_service_type(self): self.assertRaises(exception.NoServiceEndpoint, auth.get_endpoint, self.service_catalog, service_type='foo') def test_get_endpoint_raises_with_invalid_endpoint_type(self): self.assertRaises(exception.NoServiceEndpoint, auth.get_endpoint, self.service_catalog, service_type='object-store', endpoint_type='foo') def test_get_endpoint_raises_with_invalid_endpoint_region(self): self.assertRaises(exception.NoServiceEndpoint, auth.get_endpoint, self.service_catalog, service_type='object-store', endpoint_region='foo', endpoint_type='internalURL') class TestImageMutability(utils.BaseTestCase): def setUp(self): super(TestImageMutability, self).setUp() self.image_factory = glance.domain.ImageFactory() def _is_mutable(self, tenant, owner, is_admin=False): context = glance.context.RequestContext(tenant=tenant, is_admin=is_admin) image = self.image_factory.new_image(owner=owner) return authorization.is_image_mutable(context, image) def test_admin_everything_mutable(self): self.assertTrue(self._is_mutable(None, None, is_admin=True)) self.assertTrue(self._is_mutable(None, TENANT1, is_admin=True)) self.assertTrue(self._is_mutable(TENANT1, None, is_admin=True)) self.assertTrue(self._is_mutable(TENANT1, TENANT1, is_admin=True)) self.assertTrue(self._is_mutable(TENANT1, TENANT2, is_admin=True)) def test_no_tenant_nothing_mutable(self): self.assertFalse(self._is_mutable(None, None)) self.assertFalse(self._is_mutable(None, TENANT1)) def test_regular_user(self): self.assertFalse(self._is_mutable(TENANT1, None)) self.assertFalse(self._is_mutable(TENANT1, TENANT2)) self.assertTrue(self._is_mutable(TENANT1, TENANT1)) class TestImmutableImage(utils.BaseTestCase): def setUp(self): super(TestImmutableImage, self).setUp() image_factory = glance.domain.ImageFactory() self.context = glance.context.RequestContext(tenant=TENANT1) image = image_factory.new_image( image_id=UUID1, name='Marvin', owner=TENANT1, disk_format='raw', container_format='bare', extra_properties={'foo': 'bar'}, tags=['ping', 'pong'], ) self.image = authorization.ImmutableImageProxy(image, self.context) def _test_change(self, attr, value): self.assertRaises(exception.Forbidden, setattr, self.image, attr, value) self.assertRaises(exception.Forbidden, delattr, self.image, attr) def test_change_id(self): self._test_change('image_id', UUID2) def test_change_name(self): self._test_change('name', 'Freddie') def test_change_owner(self): self._test_change('owner', TENANT2) def test_change_min_disk(self): self._test_change('min_disk', 100) def test_change_min_ram(self): self._test_change('min_ram', 1024) def test_change_disk_format(self): self._test_change('disk_format', 'vhd') def test_change_container_format(self): self._test_change('container_format', 'ova') def test_change_visibility(self): self._test_change('visibility', 'public') def test_change_status(self): self._test_change('status', 'active') def test_change_created_at(self): self._test_change('created_at', timeutils.utcnow()) def test_change_updated_at(self): self._test_change('updated_at', timeutils.utcnow()) def test_change_locations(self): self._test_change('locations', ['http://a/b/c']) self.assertRaises(exception.Forbidden, self.image.locations.append, 'http://a/b/c') self.assertRaises(exception.Forbidden, self.image.locations.extend, ['http://a/b/c']) self.assertRaises(exception.Forbidden, self.image.locations.insert, 'foo') self.assertRaises(exception.Forbidden, self.image.locations.pop) self.assertRaises(exception.Forbidden, self.image.locations.remove, 'foo') self.assertRaises(exception.Forbidden, self.image.locations.reverse) self.assertRaises(exception.Forbidden, self.image.locations.sort) self.assertRaises(exception.Forbidden, self.image.locations.__delitem__, 0) self.assertRaises(exception.Forbidden, self.image.locations.__delslice__, 0, 2) self.assertRaises(exception.Forbidden, self.image.locations.__setitem__, 0, 'foo') self.assertRaises(exception.Forbidden, self.image.locations.__setslice__, 0, 2, ['foo', 'bar']) self.assertRaises(exception.Forbidden, self.image.locations.__iadd__, 'foo') self.assertRaises(exception.Forbidden, self.image.locations.__imul__, 2) def test_change_size(self): self._test_change('size', 32) def test_change_tags(self): self.assertRaises(exception.Forbidden, delattr, self.image, 'tags') self.assertRaises(exception.Forbidden, setattr, self.image, 'tags', ['king', 'kong']) self.assertRaises(exception.Forbidden, self.image.tags.pop) self.assertRaises(exception.Forbidden, self.image.tags.clear) self.assertRaises(exception.Forbidden, self.image.tags.add, 'king') self.assertRaises(exception.Forbidden, self.image.tags.remove, 'ping') self.assertRaises(exception.Forbidden, self.image.tags.update, set(['king', 'kong'])) self.assertRaises(exception.Forbidden, self.image.tags.intersection_update, set([])) self.assertRaises(exception.Forbidden, self.image.tags.difference_update, set([])) self.assertRaises(exception.Forbidden, self.image.tags.symmetric_difference_update, set([])) def test_change_properties(self): self.assertRaises(exception.Forbidden, delattr, self.image, 'extra_properties') self.assertRaises(exception.Forbidden, setattr, self.image, 'extra_properties', {}) self.assertRaises(exception.Forbidden, self.image.extra_properties.__delitem__, 'foo') self.assertRaises(exception.Forbidden, self.image.extra_properties.__setitem__, 'foo', 'b') self.assertRaises(exception.Forbidden, self.image.extra_properties.__setitem__, 'z', 'j') self.assertRaises(exception.Forbidden, self.image.extra_properties.pop) self.assertRaises(exception.Forbidden, self.image.extra_properties.popitem) self.assertRaises(exception.Forbidden, self.image.extra_properties.setdefault, 'p', 'j') self.assertRaises(exception.Forbidden, self.image.extra_properties.update, {}) def test_delete(self): self.assertRaises(exception.Forbidden, self.image.delete) def test_set_data(self): self.assertRaises(exception.Forbidden, self.image.set_data, 'blah', 4) def test_deactivate_image(self): self.assertRaises(exception.Forbidden, self.image.deactivate) def test_reactivate_image(self): self.assertRaises(exception.Forbidden, self.image.reactivate) def test_get_data(self): class FakeImage(object): def get_data(self): return 'tiddlywinks' image = glance.api.authorization.ImmutableImageProxy( FakeImage(), self.context) self.assertEqual('tiddlywinks', image.get_data()) class TestImageFactoryProxy(utils.BaseTestCase): def setUp(self): super(TestImageFactoryProxy, self).setUp() factory = glance.domain.ImageFactory() self.context = glance.context.RequestContext(tenant=TENANT1) self.image_factory = authorization.ImageFactoryProxy(factory, self.context) def test_default_owner_is_set(self): image = self.image_factory.new_image() self.assertEqual(TENANT1, image.owner) def test_wrong_owner_cannot_be_set(self): self.assertRaises(exception.Forbidden, self.image_factory.new_image, owner=TENANT2) def test_cannot_set_owner_to_none(self): self.assertRaises(exception.Forbidden, self.image_factory.new_image, owner=None) def test_admin_can_set_any_owner(self): self.context.is_admin = True image = self.image_factory.new_image(owner=TENANT2) self.assertEqual(TENANT2, image.owner) def test_admin_can_set_owner_to_none(self): self.context.is_admin = True image = self.image_factory.new_image(owner=None) self.assertIsNone(image.owner) def test_admin_still_gets_default_tenant(self): self.context.is_admin = True image = self.image_factory.new_image() self.assertEqual(TENANT1, image.owner) class TestImageRepoProxy(utils.BaseTestCase): class ImageRepoStub(object): def __init__(self, fixtures): self.fixtures = fixtures def get(self, image_id): for f in self.fixtures: if f.image_id == image_id: return f else: raise ValueError(image_id) def list(self, *args, **kwargs): return self.fixtures def setUp(self): super(TestImageRepoProxy, self).setUp() image_factory = glance.domain.ImageFactory() self.fixtures = [ image_factory.new_image(owner=TENANT1), image_factory.new_image(owner=TENANT2, visibility='public'), image_factory.new_image(owner=TENANT2), ] self.context = glance.context.RequestContext(tenant=TENANT1) image_repo = self.ImageRepoStub(self.fixtures) self.image_repo = authorization.ImageRepoProxy(image_repo, self.context) def test_get_mutable_image(self): image = self.image_repo.get(self.fixtures[0].image_id) self.assertEqual(image.image_id, self.fixtures[0].image_id) def test_get_immutable_image(self): image = self.image_repo.get(self.fixtures[1].image_id) self.assertRaises(exception.Forbidden, setattr, image, 'name', 'Vince') def test_list(self): images = self.image_repo.list() self.assertEqual(images[0].image_id, self.fixtures[0].image_id) self.assertRaises(exception.Forbidden, setattr, images[1], 'name', 'Wally') self.assertRaises(exception.Forbidden, setattr, images[2], 'name', 'Calvin') class TestImmutableTask(utils.BaseTestCase): def setUp(self): super(TestImmutableTask, self).setUp() task_factory = glance.domain.TaskFactory() self.context = glance.context.RequestContext(tenant=TENANT2) task_type = 'import' owner = TENANT2 task = task_factory.new_task(task_type, owner) self.task = authorization.ImmutableTaskProxy(task) def _test_change(self, attr, value): self.assertRaises( exception.Forbidden, setattr, self.task, attr, value ) self.assertRaises( exception.Forbidden, delattr, self.task, attr ) def test_change_id(self): self._test_change('task_id', UUID2) def test_change_type(self): self._test_change('type', 'fake') def test_change_status(self): self._test_change('status', 'success') def test_change_owner(self): self._test_change('owner', 'fake') def test_change_expires_at(self): self._test_change('expires_at', 'fake') def test_change_created_at(self): self._test_change('created_at', 'fake') def test_change_updated_at(self): self._test_change('updated_at', 'fake') def test_begin_processing(self): self.assertRaises( exception.Forbidden, self.task.begin_processing ) def test_succeed(self): self.assertRaises( exception.Forbidden, self.task.succeed, 'result' ) def test_fail(self): self.assertRaises( exception.Forbidden, self.task.fail, 'message' ) class TestImmutableTaskStub(utils.BaseTestCase): def setUp(self): super(TestImmutableTaskStub, self).setUp() task_factory = glance.domain.TaskFactory() self.context = glance.context.RequestContext(tenant=TENANT2) task_type = 'import' owner = TENANT2 task = task_factory.new_task(task_type, owner) self.task = authorization.ImmutableTaskStubProxy(task) def _test_change(self, attr, value): self.assertRaises( exception.Forbidden, setattr, self.task, attr, value ) self.assertRaises( exception.Forbidden, delattr, self.task, attr ) def test_change_id(self): self._test_change('task_id', UUID2) def test_change_type(self): self._test_change('type', 'fake') def test_change_status(self): self._test_change('status', 'success') def test_change_owner(self): self._test_change('owner', 'fake') def test_change_expires_at(self): self._test_change('expires_at', 'fake') def test_change_created_at(self): self._test_change('created_at', 'fake') def test_change_updated_at(self): self._test_change('updated_at', 'fake') class TestTaskFactoryProxy(utils.BaseTestCase): def setUp(self): super(TestTaskFactoryProxy, self).setUp() factory = glance.domain.TaskFactory() self.context = glance.context.RequestContext(tenant=TENANT1) self.context_owner_is_none = glance.context.RequestContext() self.task_factory = authorization.TaskFactoryProxy( factory, self.context ) self.task_type = 'import' self.task_input = '{"loc": "fake"}' self.owner = 'foo' self.request1 = unittest_utils.get_fake_request(tenant=TENANT1) self.request2 = unittest_utils.get_fake_request(tenant=TENANT2) def test_task_create_default_owner(self): owner = self.request1.context.owner task = self.task_factory.new_task(task_type=self.task_type, owner=owner) self.assertEqual(TENANT1, task.owner) def test_task_create_wrong_owner(self): self.assertRaises(exception.Forbidden, self.task_factory.new_task, task_type=self.task_type, task_input=self.task_input, owner=self.owner) def test_task_create_owner_as_None(self): self.assertRaises(exception.Forbidden, self.task_factory.new_task, task_type=self.task_type, task_input=self.task_input, owner=None) def test_task_create_admin_context_owner_as_None(self): self.context.is_admin = True self.assertRaises(exception.Forbidden, self.task_factory.new_task, task_type=self.task_type, task_input=self.task_input, owner=None) class TestTaskRepoProxy(utils.BaseTestCase): class TaskRepoStub(object): def __init__(self, fixtures): self.fixtures = fixtures def get(self, task_id): for f in self.fixtures: if f.task_id == task_id: return f else: raise ValueError(task_id) class TaskStubRepoStub(object): def __init__(self, fixtures): self.fixtures = fixtures def list(self, *args, **kwargs): return self.fixtures def setUp(self): super(TestTaskRepoProxy, self).setUp() task_factory = glance.domain.TaskFactory() task_type = 'import' owner = None self.fixtures = [ task_factory.new_task(task_type, owner), task_factory.new_task(task_type, owner), task_factory.new_task(task_type, owner), ] self.context = glance.context.RequestContext(tenant=TENANT1) task_repo = self.TaskRepoStub(self.fixtures) task_stub_repo = self.TaskStubRepoStub(self.fixtures) self.task_repo = authorization.TaskRepoProxy( task_repo, self.context ) self.task_stub_repo = authorization.TaskStubRepoProxy( task_stub_repo, self.context ) def test_get_mutable_task(self): task = self.task_repo.get(self.fixtures[0].task_id) self.assertEqual(task.task_id, self.fixtures[0].task_id) def test_get_immutable_task(self): task_id = self.fixtures[1].task_id task = self.task_repo.get(task_id) self.assertRaises(exception.Forbidden, setattr, task, 'input', 'foo') def test_list(self): tasks = self.task_stub_repo.list() self.assertEqual(tasks[0].task_id, self.fixtures[0].task_id) self.assertRaises(exception.Forbidden, setattr, tasks[1], 'owner', 'foo') self.assertRaises(exception.Forbidden, setattr, tasks[2], 'owner', 'foo') glance-12.0.0/glance/tests/unit/v1/0000775000567000056710000000000012701407204020061 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/v1/test_upload_utils.py0000664000567000056710000003767612701407047024226 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from contextlib import contextmanager import glance_store import mock from mock import patch import webob.exc from glance.api.v1 import upload_utils from glance.common import exception from glance.common import store_utils from glance.common import utils import glance.registry.client.v1.api as registry from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils class TestUploadUtils(base.StoreClearingUnitTest): def setUp(self): super(TestUploadUtils, self).setUp() self.config(verbose=True, debug=True) def tearDown(self): super(TestUploadUtils, self).tearDown() def test_initiate_delete(self): req = unit_test_utils.get_fake_request() location = {"url": "file://foo/bar", "metadata": {}, "status": "active"} id = unit_test_utils.UUID1 with patch.object(store_utils, "safe_delete_from_backend") as mock_store_utils: upload_utils.initiate_deletion(req, location, id) mock_store_utils.assert_called_once_with(req.context, id, location) def test_initiate_delete_with_delayed_delete(self): self.config(delayed_delete=True) req = unit_test_utils.get_fake_request() location = {"url": "file://foo/bar", "metadata": {}, "status": "active"} id = unit_test_utils.UUID1 with patch.object(store_utils, "schedule_delayed_delete_from_backend", return_value=True) as mock_store_utils: upload_utils.initiate_deletion(req, location, id) mock_store_utils.assert_called_once_with(req.context, id, location) def test_safe_kill(self): req = unit_test_utils.get_fake_request() id = unit_test_utils.UUID1 with patch.object(registry, "update_image_metadata") as mock_registry: upload_utils.safe_kill(req, id, 'saving') mock_registry.assert_called_once_with(req.context, id, {'status': 'killed'}, from_state='saving') def test_safe_kill_with_error(self): req = unit_test_utils.get_fake_request() id = unit_test_utils.UUID1 with patch.object(registry, "update_image_metadata", side_effect=Exception()) as mock_registry: upload_utils.safe_kill(req, id, 'saving') mock_registry.assert_called_once_with(req.context, id, {'status': 'killed'}, from_state='saving') @contextmanager def _get_store_and_notifier(self, image_size=10, ext_update_data=None, ret_checksum="checksum", exc_class=None): location = "file://foo/bar" checksum = "checksum" size = 10 update_data = {'checksum': checksum} if ext_update_data is not None: update_data.update(ext_update_data) image_meta = {'id': unit_test_utils.UUID1, 'size': image_size} image_data = "blah" store = mock.MagicMock() notifier = mock.MagicMock() if exc_class is not None: store.add.side_effect = exc_class else: store.add.return_value = (location, size, ret_checksum, {}) yield (location, checksum, image_meta, image_data, store, notifier, update_data) def test_upload_data_to_store(self): # 'user_storage_quota' is not set def store_add(image_id, data, size, **kwargs): # Check if 'data' is instance of 'CooperativeReader' when # 'user_storage_quota' is disabled. self.assertIsInstance(data, utils.CooperativeReader) return location, 10, "checksum", {} req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( ext_update_data={'size': 10}, exc_class=store_add) as (location, checksum, image_meta, image_data, store, notifier, update_data): ret = image_meta.update(update_data) with patch.object(registry, 'update_image_metadata', return_value=ret) as mock_update_image_metadata: actual_meta, location_data = upload_utils.upload_data_to_store( req, image_meta, image_data, store, notifier) self.assertEqual(location, location_data['url']) self.assertEqual(image_meta.update(update_data), actual_meta) mock_update_image_metadata.assert_called_once_with( req.context, image_meta['id'], update_data, from_state='saving') def test_upload_data_to_store_user_storage_quota_enabled(self): # Enable user_storage_quota self.config(user_storage_quota='100B') def store_add(image_id, data, size, **kwargs): # Check if 'data' is instance of 'LimitingReader' when # 'user_storage_quota' is enabled. self.assertIsInstance(data, utils.LimitingReader) return location, 10, "checksum", {} req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( ext_update_data={'size': 10}, exc_class=store_add) as (location, checksum, image_meta, image_data, store, notifier, update_data): ret = image_meta.update(update_data) # mock 'check_quota' mock_check_quota = patch('glance.api.common.check_quota', return_value=100) mock_check_quota.start() self.addCleanup(mock_check_quota.stop) with patch.object(registry, 'update_image_metadata', return_value=ret) as mock_update_image_metadata: actual_meta, location_data = upload_utils.upload_data_to_store( req, image_meta, image_data, store, notifier) self.assertEqual(location, location_data['url']) self.assertEqual(image_meta.update(update_data), actual_meta) mock_update_image_metadata.assert_called_once_with( req.context, image_meta['id'], update_data, from_state='saving') # 'check_quota' is called two times check_quota_call_count = ( mock_check_quota.target.check_quota.call_count) self.assertEqual(2, check_quota_call_count) def test_upload_data_to_store_mismatch_size(self): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( image_size=11) as (location, checksum, image_meta, image_data, store, notifier, update_data): ret = image_meta.update(update_data) with patch.object(registry, 'update_image_metadata', return_value=ret) as mock_update_image_metadata: self.assertRaises(webob.exc.HTTPBadRequest, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) mock_update_image_metadata.assert_called_with( req.context, image_meta['id'], {'status': 'killed'}, from_state='saving') def test_upload_data_to_store_mismatch_checksum(self): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( ret_checksum='fake') as (location, checksum, image_meta, image_data, store, notifier, update_data): ret = image_meta.update(update_data) with patch.object(registry, "update_image_metadata", return_value=ret) as mock_update_image_metadata: self.assertRaises(webob.exc.HTTPBadRequest, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) mock_update_image_metadata.assert_called_with( req.context, image_meta['id'], {'status': 'killed'}, from_state='saving') def _test_upload_data_to_store_exception(self, exc_class, expected_class): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( exc_class=exc_class) as (location, checksum, image_meta, image_data, store, notifier, update_data): with patch.object(upload_utils, 'safe_kill') as mock_safe_kill: self.assertRaises(expected_class, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) mock_safe_kill.assert_called_once_with( req, image_meta['id'], 'saving') def _test_upload_data_to_store_exception_with_notify(self, exc_class, expected_class, image_killed=True): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( exc_class=exc_class) as (location, checksum, image_meta, image_data, store, notifier, update_data): with patch.object(upload_utils, 'safe_kill') as mock_safe_kill: self.assertRaises(expected_class, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) if image_killed: mock_safe_kill.assert_called_with(req, image_meta['id'], 'saving') def test_upload_data_to_store_raises_store_disabled(self): """Test StoreDisabled exception is raised while uploading data""" self._test_upload_data_to_store_exception_with_notify( glance_store.StoreAddDisabled, webob.exc.HTTPGone, image_killed=True) def test_upload_data_to_store_duplicate(self): """See note in glance.api.v1.upload_utils on why we don't want image to be deleted in this case. """ self._test_upload_data_to_store_exception_with_notify( exception.Duplicate, webob.exc.HTTPConflict, image_killed=False) def test_upload_data_to_store_forbidden(self): self._test_upload_data_to_store_exception_with_notify( exception.Forbidden, webob.exc.HTTPForbidden) def test_upload_data_to_store_storage_full(self): self._test_upload_data_to_store_exception_with_notify( glance_store.StorageFull, webob.exc.HTTPRequestEntityTooLarge) def test_upload_data_to_store_storage_write_denied(self): self._test_upload_data_to_store_exception_with_notify( glance_store.StorageWriteDenied, webob.exc.HTTPServiceUnavailable) def test_upload_data_to_store_size_limit_exceeded(self): self._test_upload_data_to_store_exception_with_notify( exception.ImageSizeLimitExceeded, webob.exc.HTTPRequestEntityTooLarge) def test_upload_data_to_store_http_error(self): self._test_upload_data_to_store_exception_with_notify( webob.exc.HTTPError, webob.exc.HTTPError) def test_upload_data_to_store_client_disconnect(self): self._test_upload_data_to_store_exception( ValueError, webob.exc.HTTPBadRequest) def test_upload_data_to_store_client_disconnect_ioerror(self): self._test_upload_data_to_store_exception( IOError, webob.exc.HTTPBadRequest) def test_upload_data_to_store_exception(self): self._test_upload_data_to_store_exception_with_notify( Exception, webob.exc.HTTPInternalServerError) def test_upload_data_to_store_not_found_after_upload(self): req = unit_test_utils.get_fake_request() with self._get_store_and_notifier( ext_update_data={'size': 10}) as (location, checksum, image_meta, image_data, store, notifier, update_data): exc = exception.ImageNotFound with patch.object(registry, 'update_image_metadata', side_effect=exc) as mock_update_image_metadata: with patch.object(upload_utils, "initiate_deletion") as mock_initiate_del: with patch.object(upload_utils, "safe_kill") as mock_safe_kill: self.assertRaises(webob.exc.HTTPPreconditionFailed, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) mock_update_image_metadata.assert_called_once_with( req.context, image_meta['id'], update_data, from_state='saving') mock_initiate_del.assert_called_once_with( req, {'url': location, 'status': 'active', 'metadata': {}}, image_meta['id']) mock_safe_kill.assert_called_once_with( req, image_meta['id'], 'saving') @mock.patch.object(registry, 'update_image_metadata', side_effect=exception.NotAuthenticated) @mock.patch.object(upload_utils, 'initiate_deletion') def test_activate_image_with_expired_token( self, mocked_delete, mocked_update): """Test token expiration during image upload. If users token expired before image was uploaded then if auth error was caught from registry during changing image status from 'saving' to 'active' then it's required to delete all image data. """ context = mock.Mock() req = mock.Mock() req.context = context with self._get_store_and_notifier() as (location, checksum, image_meta, image_data, store, notifier, update_data): self.assertRaises(webob.exc.HTTPUnauthorized, upload_utils.upload_data_to_store, req, image_meta, image_data, store, notifier) self.assertEqual(2, mocked_update.call_count) mocked_delete.assert_called_once_with( req, {'url': 'file://foo/bar', 'status': 'active', 'metadata': {}}, 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d') glance-12.0.0/glance/tests/unit/v1/__init__.py0000664000567000056710000000000012701407047022165 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/unit/v1/test_api.py0000664000567000056710000061236212701407047022262 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import hashlib import os import signal import uuid import glance_store as store import mock from oslo_config import cfg from oslo_serialization import jsonutils import routes import six import webob import glance.api import glance.api.common from glance.api.v1 import router from glance.api.v1 import upload_utils import glance.common.config from glance.common import exception from glance.common import timeutils import glance.context from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import models as db_models import glance.registry.client.v1.api as registry from glance.tests.unit import base import glance.tests.unit.utils as unit_test_utils from glance.tests import utils as test_utils CONF = cfg.CONF _gen_uuid = lambda: str(uuid.uuid4()) UUID1 = _gen_uuid() UUID2 = _gen_uuid() UUID3 = _gen_uuid() class TestGlanceAPI(base.IsolatedUnitTest): def setUp(self): """Establish a clean test environment""" super(TestGlanceAPI, self).setUp() self.mapper = routes.Mapper() self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper)) self.FIXTURES = [ {'id': UUID1, 'name': 'fake image #1', 'status': 'active', 'disk_format': 'ami', 'container_format': 'ami', 'is_public': False, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(), 'deleted_at': None, 'deleted': False, 'checksum': None, 'size': 13, 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1), 'metadata': {}, 'status': 'active'}], 'properties': {'type': 'kernel'}}, {'id': UUID2, 'name': 'fake image #2', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(), 'deleted_at': None, 'deleted': False, 'checksum': 'abc123', 'size': 19, 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2), 'metadata': {}, 'status': 'active'}], 'properties': {}}, {'id': UUID3, 'name': 'fake image #3', 'status': 'deactivated', 'disk_format': 'ami', 'container_format': 'ami', 'is_public': False, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(), 'deleted_at': None, 'deleted': False, 'checksum': '13', 'size': 13, 'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1), 'metadata': {}, 'status': 'active'}], 'properties': {}}] self.context = glance.context.RequestContext(is_admin=True) db_api.get_engine() self.destroy_fixtures() self.addCleanup(self.destroy_fixtures) self.create_fixtures() # Used to store/track image status changes for post-analysis self.image_status = [] self.http_server_pid = None self.addCleanup(self._cleanup_server) ret = test_utils.start_http_server("foo_image_id", b"foo_image") self.http_server_pid, self.http_port = ret def _cleanup_server(self): if self.http_server_pid is not None: os.kill(self.http_server_pid, signal.SIGKILL) def create_fixtures(self): for fixture in self.FIXTURES: db_api.image_create(self.context, fixture) # We write a fake image file to the filesystem with open("%s/%s" % (self.test_dir, fixture['id']), 'wb') as image: image.write(b"chunk00000remainder") image.flush() def destroy_fixtures(self): # Easiest to just drop the models and re-create them... db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def _do_test_defaulted_format(self, format_key, format_value): fixture_headers = {'x-image-meta-name': 'defaulted', 'x-image-meta-location': 'http://localhost:0/image', format_key: format_value} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as mocked_size: mocked_size.return_value = 0 res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual(format_value, res_body['disk_format']) self.assertEqual(format_value, res_body['container_format']) def _http_loc_url(self, path): return 'http://127.0.0.1:%d%s' % (self.http_port, path) def test_defaulted_amazon_format(self): for key in ('x-image-meta-disk-format', 'x-image-meta-container-format'): for value in ('aki', 'ari', 'ami'): self._do_test_defaulted_format(key, value) def test_bad_time_create_minus_int(self): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-created_at': '-42', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_bad_time_create_string(self): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-created_at': 'foo', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_bad_time_create_low_year(self): # 'strftime' only allows values after 1900 in glance v1 fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-created_at': '1100', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_bad_time_create_string_in_date(self): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-created_at': '2012-01-01hey12:32:12', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_bad_min_disk_size_create(self): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-min-disk': '-42', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid value', res.body) def test_updating_imageid_after_creation(self): # Test incorrect/illegal id update req = webob.Request.blank("/images/%s" % UUID1) req.method = 'PUT' req.headers['x-image-meta-id'] = '000000-000-0000-0000-000' res = req.get_response(self.api) self.assertEqual(403, res.status_int) # Test using id of another image req = webob.Request.blank("/images/%s" % UUID1) req.method = 'PUT' req.headers['x-image-meta-id'] = UUID2 res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_bad_min_disk_size_update(self): fixture_headers = {'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['x-image-meta-min-disk'] = '-42' res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid value', res.body) def test_invalid_min_disk_size_update(self): fixture_headers = {'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['x-image-meta-min-disk'] = str(2 ** 31 + 1) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_bad_min_ram_size_create(self): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-min-ram': '-42', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid value', res.body) def test_bad_min_ram_size_update(self): fixture_headers = {'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['x-image-meta-min-ram'] = '-42' res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid value', res.body) def test_invalid_min_ram_size_update(self): fixture_headers = {'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['x-image-meta-min-ram'] = str(2 ** 31 + 1) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_bad_disk_format(self): fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-disk-format': 'invalid', 'x-image-meta-container-format': 'ami', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid disk format', res.body) def test_configured_disk_format_good(self): self.config(disk_formats=['foo'], group="image_format") fixture_headers = { 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-disk-format': 'foo', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as mocked_size: mocked_size.return_value = 0 res = req.get_response(self.api) self.assertEqual(201, res.status_int) def test_configured_disk_format_bad(self): self.config(disk_formats=['foo'], group="image_format") fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-disk-format': 'bar', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid disk format', res.body) def test_configured_container_format_good(self): self.config(container_formats=['foo'], group="image_format") fixture_headers = { 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-disk-format': 'raw', 'x-image-meta-container-format': 'foo', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as mocked_size: mocked_size.return_value = 0 res = req.get_response(self.api) self.assertEqual(201, res.status_int) def test_configured_container_format_bad(self): self.config(container_formats=['foo'], group="image_format") fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-disk-format': 'raw', 'x-image-meta-container-format': 'bar', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid container format', res.body) def test_container_and_disk_amazon_format_differs(self): fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-disk-format': 'aki', 'x-image-meta-container-format': 'ami'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) expected = (b"Invalid mix of disk and container formats. " b"When setting a disk or container format to one of " b"'aki', 'ari', or 'ami', " b"the container and disk formats must match.") self.assertEqual(400, res.status_int) self.assertIn(expected, res.body) def test_create_with_location_no_container_format(self): fixture_headers = { 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-disk-format': 'vhd', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as mocked_size: mocked_size.return_value = 0 res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Container format is not specified', res.body) def test_create_with_location_no_disk_format(self): fixture_headers = { 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as mocked_size: mocked_size.return_value = 0 res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Disk format is not specified', res.body) def test_create_with_empty_location(self): fixture_headers = { 'x-image-meta-location': '', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_with_empty_copy_from(self): fixture_headers = { 'x-glance-api-copy-from': '', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_delayed_image_with_no_disk_and_container_formats(self): fixture_headers = { 'x-image-meta-name': 'delayed', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as mocked_size: mocked_size.return_value = 0 res = req.get_response(self.api) self.assertEqual(201, res.status_int) def test_create_with_bad_store_name(self): fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'bogus', 'x-image-meta-disk-format': 'qcow2', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Required store bad is invalid', res.body) @mock.patch.object(glance.api.v1.images.Controller, '_external_source') @mock.patch.object(store, 'get_store_from_location') def test_create_with_location_get_store_or_400_raises_exception( self, mock_get_store_from_location, mock_external_source): location = 'bad+scheme://localhost:0/image.qcow2' scheme = 'bad+scheme' fixture_headers = { 'x-image-meta-name': 'bogus', 'x-image-meta-location': location, 'x-image-meta-disk-format': 'qcow2', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v mock_external_source.return_value = location mock_get_store_from_location.return_value = scheme res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertEqual(1, mock_external_source.call_count) self.assertEqual(1, mock_get_store_from_location.call_count) self.assertIn('Store for scheme %s not found' % scheme, res.body.decode('utf-8')) def test_create_with_location_unknown_scheme(self): fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'bad+scheme://localhost:0/image.qcow2', 'x-image-meta-disk-format': 'qcow2', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'External sources are not supported', res.body) def test_create_with_location_bad_store_uri(self): fixture_headers = { 'x-image-meta-store': 'file', 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://', 'x-image-meta-disk-format': 'qcow2', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid location', res.body) def test_create_image_with_too_many_properties(self): self.config(image_property_quota=1) another_request = unit_test_utils.get_fake_request( path='/images', method='POST') headers = {'x-auth-token': 'user:tenant:joe_soap', 'x-image-meta-property-x_all_permitted': '1', 'x-image-meta-property-x_all_permitted_foo': '2'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(413, output.status_int) def test_bad_container_format(self): fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'bogus', 'x-image-meta-location': 'http://localhost:0/image.tar.gz', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'invalid', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid container format', res.body) def test_bad_image_size(self): fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'bogus', 'x-image-meta-location': self._http_loc_url('/image.tar.gz'), 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'bare', } def exec_bad_size_test(bad_size, expected_substr): fixture_headers['x-image-meta-size'] = bad_size req = webob.Request.blank("/images", method='POST', headers=fixture_headers) res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(expected_substr, res.body) expected = b"Cannot convert image size 'invalid' to an integer." exec_bad_size_test('invalid', expected) expected = b"Cannot be a negative value." exec_bad_size_test(-10, expected) def test_bad_image_name(self): fixture_headers = { 'x-image-meta-store': 'bad', 'x-image-meta-name': 'X' * 256, 'x-image-meta-location': self._http_loc_url('/image.tar.gz'), 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_image_no_location_no_image_as_body(self): """Tests creates a queued image for no body and no loc header""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3', 'x-image-created_at': '2015-11-20', 'x-image-updated_at': '2015-12-01 12:10:01', 'x-image-deleted_at': '2000'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] # Test that we are able to edit the Location field # per LP Bug #911599 req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['x-image-meta-location'] = 'http://localhost:0/images/123' http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as mocked_size: mocked_size.return_value = 0 res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_body = jsonutils.loads(res.body)['image'] # Once the location is set, the image should be activated # see LP Bug #939484 self.assertEqual('active', res_body['status']) self.assertNotIn('location', res_body) # location never shown def test_add_image_no_location_no_content_type(self): """Tests creates a queued image for no body and no loc header""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' req.body = b"chunk00000remainder" for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_image_size_header_too_big(self): """Tests raises BadRequest for supplied image size that is too big""" fixture_headers = {'x-image-meta-size': CONF.image_size_cap + 1, 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_image_size_chunked_data_too_big(self): self.config(image_size_cap=512) fixture_headers = { 'x-image-meta-name': 'fake image #3', 'x-image-meta-container_format': 'ami', 'x-image-meta-disk_format': 'ami', 'transfer-encoding': 'chunked', 'content-type': 'application/octet-stream', } req = webob.Request.blank("/images") req.method = 'POST' req.body_file = six.StringIO('X' * (CONF.image_size_cap + 1)) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(413, res.status_int) def test_add_image_size_data_too_big(self): self.config(image_size_cap=512) fixture_headers = { 'x-image-meta-name': 'fake image #3', 'x-image-meta-container_format': 'ami', 'x-image-meta-disk_format': 'ami', 'content-type': 'application/octet-stream', } req = webob.Request.blank("/images") req.method = 'POST' req.body = b'X' * (CONF.image_size_cap + 1) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_image_size_header_exceed_quota(self): quota = 500 self.config(user_storage_quota=str(quota)) fixture_headers = {'x-image-meta-size': quota + 1, 'x-image-meta-name': 'fake image #3', 'x-image-meta-container_format': 'bare', 'x-image-meta-disk_format': 'qcow2', 'content-type': 'application/octet-stream', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.body = b'X' * (quota + 1) res = req.get_response(self.api) self.assertEqual(413, res.status_int) def test_add_image_size_data_exceed_quota(self): quota = 500 self.config(user_storage_quota=str(quota)) fixture_headers = { 'x-image-meta-name': 'fake image #3', 'x-image-meta-container_format': 'bare', 'x-image-meta-disk_format': 'qcow2', 'content-type': 'application/octet-stream', } req = webob.Request.blank("/images") req.method = 'POST' req.body = b'X' * (quota + 1) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(413, res.status_int) def test_add_image_size_data_exceed_quota_readd(self): quota = 500 self.config(user_storage_quota=str(quota)) fixture_headers = { 'x-image-meta-name': 'fake image #3', 'x-image-meta-container_format': 'bare', 'x-image-meta-disk_format': 'qcow2', 'content-type': 'application/octet-stream', } req = webob.Request.blank("/images") req.method = 'POST' req.body = b'X' * (quota + 1) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(413, res.status_int) used_size = sum([f['size'] for f in self.FIXTURES]) req = webob.Request.blank("/images") req.method = 'POST' req.body = b'X' * (quota - used_size) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) def _add_check_no_url_info(self): fixture_headers = {'x-image-meta-disk-format': 'ami', 'x-image-meta-container-format': 'ami', 'x-image-meta-size': '0', 'x-image-meta-name': 'empty image'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) res_body = jsonutils.loads(res.body)['image'] self.assertNotIn('locations', res_body) self.assertNotIn('direct_url', res_body) image_id = res_body['id'] # HEAD empty image req = webob.Request.blank("/images/%s" % image_id) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertNotIn('x-image-meta-locations', res.headers) self.assertNotIn('x-image-meta-direct_url', res.headers) def test_add_check_no_url_info_ml(self): self.config(show_multiple_locations=True) self._add_check_no_url_info() def test_add_check_no_url_info_direct_url(self): self.config(show_image_direct_url=True) self._add_check_no_url_info() def test_add_check_no_url_info_both_on(self): self.config(show_image_direct_url=True) self.config(show_multiple_locations=True) self._add_check_no_url_info() def test_add_check_no_url_info_both_off(self): self._add_check_no_url_info() def test_add_image_zero_size(self): """Tests creating an active image with explicitly zero size""" fixture_headers = {'x-image-meta-disk-format': 'ami', 'x-image-meta-container-format': 'ami', 'x-image-meta-size': '0', 'x-image-meta-name': 'empty image'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('active', res_body['status']) image_id = res_body['id'] # GET empty image req = webob.Request.blank("/images/%s" % image_id) res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual(0, len(res.body)) def _do_test_add_image_attribute_mismatch(self, attributes): fixture_headers = { 'x-image-meta-name': 'fake image #3', } fixture_headers.update(attributes) req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"XXXX" res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_image_checksum_mismatch(self): attributes = { 'x-image-meta-checksum': 'asdf', } self._do_test_add_image_attribute_mismatch(attributes) def test_add_image_size_mismatch(self): attributes = { 'x-image-meta-size': str(len("XXXX") + 1), } self._do_test_add_image_attribute_mismatch(attributes) def test_add_image_checksum_and_size_mismatch(self): attributes = { 'x-image-meta-checksum': 'asdf', 'x-image-meta-size': str(len("XXXX") + 1), } self._do_test_add_image_attribute_mismatch(attributes) def test_add_image_bad_store(self): """Tests raises BadRequest for invalid store header""" fixture_headers = {'x-image-meta-store': 'bad', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_image_basic_file_store(self): """Tests to add a basic image in the file store""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(201, res.status_int) # Test that the Location: header is set to the URI to # edit the newly-created image, as required by APP. # See LP Bug #719825 self.assertIn('location', res.headers, "'location' not in response headers.\n" "res.headerlist = %r" % res.headerlist) res_body = jsonutils.loads(res.body)['image'] self.assertIn('/images/%s' % res_body['id'], res.headers['location']) self.assertEqual('active', res_body['status']) image_id = res_body['id'] # Test that we are NOT able to edit the Location field # per LP Bug #911599 req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' url = self._http_loc_url('/images/123') req.headers['x-image-meta-location'] = url res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_image_unauthorized(self): rules = {"add_image": '!'} self.set_policy_rules(rules) fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_add_publicize_image_unauthorized(self): rules = {"add_image": '@', "modify_image": '@', "publicize_image": '!'} self.set_policy_rules(rules) fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-is-public': 'true', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_add_publicize_image_authorized(self): rules = {"add_image": '@', "modify_image": '@', "publicize_image": '@', "upload_image": '@'} self.set_policy_rules(rules) fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-is-public': 'true', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(201, res.status_int) def test_add_copy_from_image_unauthorized(self): rules = {"add_image": '@', "copy_from": '!'} self.set_policy_rules(rules) url = self._http_loc_url('/i.ovf') fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-glance-api-copy-from': url, 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_add_copy_from_upload_image_unauthorized(self): rules = {"add_image": '@', "copy_from": '@', "upload_image": '!'} self.set_policy_rules(rules) url = self._http_loc_url('/i.ovf') fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-glance-api-copy-from': url, 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_add_copy_from_image_authorized_upload_image_authorized(self): rules = {"add_image": '@', "copy_from": '@', "upload_image": '@'} self.set_policy_rules(rules) url = self._http_loc_url('/i.ovf') fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-glance-api-copy-from': url, 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as mock_size: mock_size.return_value = 0 res = req.get_response(self.api) self.assertEqual(201, res.status_int) def test_upload_image_http_nonexistent_location_url(self): # Ensure HTTP 404 response returned when try to upload # image from non-existent http location URL. rules = {"add_image": '@', "copy_from": '@', "upload_image": '@'} self.set_policy_rules(rules) fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-glance-api-copy-from': self._http_loc_url('/non_existing_image_path'), 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_add_copy_from_with_nonempty_body(self): """Tests creates an image from copy-from and nonempty body""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-glance-api-copy-from': 'http://a/b/c.ovf', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} req = webob.Request.blank("/images") req.headers['Content-Type'] = 'application/octet-stream' req.method = 'POST' req.body = b"chunk00000remainder" for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_location_with_nonempty_body(self): """Tests creates an image from location and nonempty body""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-location': 'http://a/b/c.tar.gz', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} req = webob.Request.blank("/images") req.headers['Content-Type'] = 'application/octet-stream' req.method = 'POST' req.body = b"chunk00000remainder" for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_location_with_conflict_image_size(self): """Tests creates an image from location and conflict image size""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-location': 'http://a/b/c.tar.gz', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F', 'x-image-meta-size': '1'} req = webob.Request.blank("/images") req.headers['Content-Type'] = 'application/octet-stream' req.method = 'POST' http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as size: size.return_value = 2 for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(409, res.status_int) def test_add_location_with_invalid_location_on_conflict_image_size(self): """Tests creates an image from location and conflict image size""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-location': 'http://a/b/c.tar.gz', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F', 'x-image-meta-size': '1'} req = webob.Request.blank("/images") req.headers['Content-Type'] = 'application/octet-stream' req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_location_with_invalid_location_on_restricted_sources(self): """Tests creates an image from location and restricted sources""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-location': 'file:///etc/passwd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} req = webob.Request.blank("/images") req.headers['Content-Type'] = 'application/octet-stream' req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-location': 'swift+config://xxx', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} req = webob.Request.blank("/images") req.headers['Content-Type'] = 'application/octet-stream' req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_create_image_with_nonexistent_location_url(self): # Ensure HTTP 404 response returned when try to create # image with non-existent http location URL. fixture_headers = { 'x-image-meta-name': 'bogus', 'x-image-meta-location': self._http_loc_url('/non_existing_image_path'), 'x-image-meta-disk-format': 'qcow2', 'x-image-meta-container-format': 'bare', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_add_copy_from_with_location(self): """Tests creates an image from copy-from and location""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-glance-api-copy-from': 'http://a/b/c.ovf', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F', 'x-image-meta-location': 'http://a/b/c.tar.gz'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_copy_from_with_restricted_sources(self): """Tests creates an image from copy-from with restricted sources""" header_template = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #F'} schemas = ["file:///etc/passwd", "swift+config:///xxx", "filesystem:///etc/passwd"] for schema in schemas: req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(header_template): req.headers[k] = v req.headers['x-glance-api-copy-from'] = schema res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_add_copy_from_upload_image_unauthorized_with_body(self): rules = {"upload_image": '!', "modify_image": '@', "add_image": '@'} self.set_policy_rules(rules) self.config(image_size_cap=512) fixture_headers = { 'x-image-meta-name': 'fake image #3', 'x-image-meta-container_format': 'ami', 'x-image-meta-disk_format': 'ami', 'transfer-encoding': 'chunked', 'content-type': 'application/octet-stream', } req = webob.Request.blank("/images") req.method = 'POST' req.body_file = six.StringIO('X' * (CONF.image_size_cap)) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_update_data_upload_bad_store_uri(self): fixture_headers = {'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/octet-stream' req.headers['x-image-disk-format'] = 'vhd' req.headers['x-image-container-format'] = 'ovf' req.headers['x-image-meta-location'] = 'http://' res = req.get_response(self.api) self.assertEqual(400, res.status_int) self.assertIn(b'Invalid location', res.body) def test_update_data_upload_image_unauthorized(self): rules = {"upload_image": '!', "modify_image": '@', "add_image": '@'} self.set_policy_rules(rules) """Tests creates a queued image for no body and no loc header""" self.config(image_size_cap=512) fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/octet-stream' req.headers['transfer-encoding'] = 'chunked' req.headers['x-image-disk-format'] = 'vhd' req.headers['x-image-container-format'] = 'ovf' req.body_file = six.StringIO('X' * (CONF.image_size_cap)) res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_update_copy_from_upload_image_unauthorized(self): rules = {"upload_image": '!', "modify_image": '@', "add_image": '@', "copy_from": '@'} self.set_policy_rules(rules) fixture_headers = {'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/octet-stream' req.headers['x-glance-api-copy-from'] = self._http_loc_url('/i.ovf') res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_update_copy_from_unauthorized(self): rules = {"upload_image": '@', "modify_image": '@', "add_image": '@', "copy_from": '!'} self.set_policy_rules(rules) fixture_headers = {'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/octet-stream' req.headers['x-glance-api-copy-from'] = self._http_loc_url('/i.ovf') res = req.get_response(self.api) self.assertEqual(403, res.status_int) def _do_test_post_image_content_missing_format(self, missing): """Tests creation of an image with missing format""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} header = 'x-image-meta-' + missing.replace('_', '-') del fixture_headers[header] req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_post_image_content_missing_disk_format(self): """Tests creation of an image with missing disk format""" self._do_test_post_image_content_missing_format('disk_format') def test_post_image_content_missing_container_type(self): """Tests creation of an image with missing container format""" self._do_test_post_image_content_missing_format('container_format') def _do_test_put_image_content_missing_format(self, missing): """Tests delayed activation of an image with missing format""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} header = 'x-image-meta-' + missing.replace('_', '-') del fixture_headers[header] req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) image_id = res_body['id'] req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_put_image_content_missing_disk_format(self): """Tests delayed activation of image with missing disk format""" self._do_test_put_image_content_missing_format('disk_format') def test_put_image_content_missing_container_type(self): """Tests delayed activation of image with missing container format""" self._do_test_put_image_content_missing_format('container_format') def test_download_deactivated_images(self): """Tests exception raised trying to download a deactivated image""" req = webob.Request.blank("/images/%s" % UUID3) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_update_deleted_image(self): """Tests that exception raised trying to update a deleted image""" req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) fixture = {'name': 'test_del_img'} req = webob.Request.blank('/images/%s' % UUID2) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(image=fixture)) res = req.get_response(self.api) self.assertEqual(403, res.status_int) self.assertIn(b'Forbidden to update deleted image', res.body) def test_delete_deleted_image(self): """Tests that exception raised trying to delete a deleted image""" req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Verify the status is 'deleted' req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual("deleted", res.headers['x-image-meta-status']) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(404, res.status_int) msg = "Image %s not found." % UUID2 self.assertIn(msg, res.body.decode()) # Verify the status is still 'deleted' req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual("deleted", res.headers['x-image-meta-status']) def test_image_status_when_delete_fails(self): """ Tests that the image status set to active if deletion of image fails. """ fs = store.get_store_from_scheme('file') with mock.patch.object(fs, 'delete') as mock_fsstore_delete: mock_fsstore_delete.side_effect = exception.Forbidden() # trigger the v1 delete api req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(403, res.status_int) self.assertIn(b'Forbidden to delete image', res.body) # check image metadata is still there with active state req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual("active", res.headers['x-image-meta-status']) def test_delete_pending_delete_image(self): """ Tests that correct response returned when deleting a pending_delete image """ # First deletion self.config(delayed_delete=True) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Verify the status is 'pending_delete' req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual("pending_delete", res.headers['x-image-meta-status']) # Second deletion req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(403, res.status_int) self.assertIn(b'Forbidden to delete a pending_delete image', res.body) # Verify the status is still 'pending_delete' req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual("pending_delete", res.headers['x-image-meta-status']) def test_upload_to_image_status_saving(self): """Test image upload conflict. If an image is uploaded before an existing upload to the same image completes, the original upload should succeed and the conflicting one should fail and any data be deleted. """ fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'some-foo-image'} # create an image but don't upload yet. req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] image_id = res_body['id'] self.assertIn('/images/%s' % image_id, res.headers['location']) # verify the status is 'queued' self.assertEqual('queued', res_body['status']) orig_get_image_metadata = registry.get_image_metadata orig_image_get = db_api._image_get orig_image_update = db_api._image_update orig_initiate_deletion = upload_utils.initiate_deletion # this will be used to track what is called and their order. call_sequence = [] # use this to determine if we are within a db session i.e. atomic # operation, that is setting our active state. # We want first status check to be 'queued' so we get past the # first guard. test_status = { 'activate_session_started': False, 'queued_guard_passed': False } state_changes = [] def mock_image_update(context, values, image_id, purge_props=False, from_state=None): status = values.get('status') if status: state_changes.append(status) if status == 'active': # We only expect this state to be entered once. if test_status['activate_session_started']: raise Exception("target session already started") test_status['activate_session_started'] = True call_sequence.append('update_active') else: call_sequence.append('update') return orig_image_update(context, values, image_id, purge_props=purge_props, from_state=from_state) def mock_image_get(*args, **kwargs): """Force status to 'saving' if not within activate db session. If we are in the activate db session we return 'active' which we then expect to cause exception.Conflict to be raised since this indicates that another upload has succeeded. """ image = orig_image_get(*args, **kwargs) if test_status['activate_session_started']: call_sequence.append('image_get_active') setattr(image, 'status', 'active') else: setattr(image, 'status', 'saving') return image def mock_get_image_metadata(*args, **kwargs): """Force image status sequence. """ call_sequence.append('get_image_meta') meta = orig_get_image_metadata(*args, **kwargs) if not test_status['queued_guard_passed']: meta['status'] = 'queued' test_status['queued_guard_passed'] = True return meta def mock_initiate_deletion(*args, **kwargs): call_sequence.append('init_del') orig_initiate_deletion(*args, **kwargs) req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" with mock.patch.object( upload_utils, 'initiate_deletion') as mock_init_del: mock_init_del.side_effect = mock_initiate_deletion with mock.patch.object( registry, 'get_image_metadata') as mock_get_meta: mock_get_meta.side_effect = mock_get_image_metadata with mock.patch.object(db_api, '_image_get') as mock_db_get: mock_db_get.side_effect = mock_image_get with mock.patch.object( db_api, '_image_update') as mock_db_update: mock_db_update.side_effect = mock_image_update # Expect a 409 Conflict. res = req.get_response(self.api) self.assertEqual(409, res.status_int) # Check expected call sequence self.assertEqual(['get_image_meta', 'get_image_meta', 'update', 'update_active', 'image_get_active', 'init_del'], call_sequence) self.assertTrue(mock_get_meta.called) self.assertTrue(mock_db_get.called) self.assertTrue(mock_db_update.called) # Ensure cleanup occurred. self.assertEqual(1, mock_init_del.call_count) self.assertEqual(['saving', 'active'], state_changes) def test_register_and_upload(self): """ Test that the process of registering an image with some metadata, then uploading an image file with some more metadata doesn't mark the original metadata deleted :see LP Bug#901534 """ fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3', 'x-image-meta-property-key1': 'value1'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertIn('id', res_body) image_id = res_body['id'] self.assertIn('/images/%s' % image_id, res.headers['location']) # Verify the status is queued self.assertIn('status', res_body) self.assertEqual('queued', res_body['status']) # Check properties are not deleted self.assertIn('properties', res_body) self.assertIn('key1', res_body['properties']) self.assertEqual('value1', res_body['properties']['key1']) # Now upload the image file along with some more # metadata and verify original metadata properties # are not marked deleted req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/octet-stream' req.headers['x-image-meta-property-key2'] = 'value2' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Verify the status is 'queued' req = webob.Request.blank("/images/%s" % image_id) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertIn('x-image-meta-property-key1', res.headers, "Did not find required property in headers. " "Got headers: %r" % res.headers) self.assertEqual("active", res.headers['x-image-meta-status']) def test_upload_image_raises_store_disabled(self): """Test that uploading an image file returns HTTTP 410 response""" # create image fs = store.get_store_from_scheme('file') fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3', 'x-image-meta-property-key1': 'value1'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertIn('id', res_body) image_id = res_body['id'] self.assertIn('/images/%s' % image_id, res.headers['location']) # Verify the status is queued self.assertIn('status', res_body) self.assertEqual('queued', res_body['status']) # Now upload the image file with mock.patch.object(fs, 'add') as mock_fsstore_add: mock_fsstore_add.side_effect = store.StoreAddDisabled req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(410, res.status_int) self._verify_image_status(image_id, 'killed') def _get_image_status(self, image_id): req = webob.Request.blank("/images/%s" % image_id) req.method = 'HEAD' return req.get_response(self.api) def _verify_image_status(self, image_id, status, check_deleted=False, use_cached=False): if not use_cached: res = self._get_image_status(image_id) else: res = self.image_status.pop(0) self.assertEqual(200, res.status_int) self.assertEqual(status, res.headers['x-image-meta-status']) self.assertEqual(str(check_deleted), res.headers['x-image-meta-deleted']) def _upload_safe_kill_common(self, mocks): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3', 'x-image-meta-property-key1': 'value1'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertIn('id', res_body) self.image_id = res_body['id'] self.assertIn('/images/%s' % self.image_id, res.headers['location']) # Verify the status is 'queued' self.assertEqual('queued', res_body['status']) for m in mocks: m['mock'].side_effect = m['side_effect'] # Now upload the image file along with some more metadata and # verify original metadata properties are not marked deleted req = webob.Request.blank("/images/%s" % self.image_id) req.method = 'PUT' req.headers['Content-Type'] = 'application/octet-stream' req.headers['x-image-meta-property-key2'] = 'value2' req.body = b"chunk00000remainder" res = req.get_response(self.api) # We expect 500 since an exception occurred during upload. self.assertEqual(500, res.status_int) @mock.patch('glance_store.store_add_to_backend') def test_upload_safe_kill(self, mock_store_add_to_backend): def mock_store_add_to_backend_w_exception(*args, **kwargs): """Trigger mid-upload failure by raising an exception.""" self.image_status.append(self._get_image_status(self.image_id)) # Raise an exception to emulate failed upload. raise Exception("== UNIT TEST UPLOAD EXCEPTION ==") mocks = [{'mock': mock_store_add_to_backend, 'side_effect': mock_store_add_to_backend_w_exception}] self._upload_safe_kill_common(mocks) # Check we went from 'saving' -> 'killed' self._verify_image_status(self.image_id, 'saving', use_cached=True) self._verify_image_status(self.image_id, 'killed') self.assertEqual(1, mock_store_add_to_backend.call_count) @mock.patch('glance_store.store_add_to_backend') def test_upload_safe_kill_deleted(self, mock_store_add_to_backend): test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware(test_router_api, is_admin=True) def mock_store_add_to_backend_w_exception(*args, **kwargs): """We now delete the image, assert status is 'deleted' then raise an exception to emulate a failed upload. This will be caught by upload_data_to_store() which will then try to set status to 'killed' which will be ignored since the image has been deleted. """ # expect 'saving' self.image_status.append(self._get_image_status(self.image_id)) req = webob.Request.blank("/images/%s" % self.image_id) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) # expect 'deleted' self.image_status.append(self._get_image_status(self.image_id)) # Raise an exception to make the upload fail. raise Exception("== UNIT TEST UPLOAD EXCEPTION ==") mocks = [{'mock': mock_store_add_to_backend, 'side_effect': mock_store_add_to_backend_w_exception}] self._upload_safe_kill_common(mocks) # Check we went from 'saving' -> 'deleted' -> 'deleted' self._verify_image_status(self.image_id, 'saving', check_deleted=False, use_cached=True) self._verify_image_status(self.image_id, 'deleted', check_deleted=True, use_cached=True) self._verify_image_status(self.image_id, 'deleted', check_deleted=True) self.assertEqual(1, mock_store_add_to_backend.call_count) def _check_delete_during_image_upload(self, is_admin=False): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3', 'x-image-meta-property-key1': 'value1'} req = unit_test_utils.get_fake_request(path="/images", is_admin=is_admin) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertIn('id', res_body) image_id = res_body['id'] self.assertIn('/images/%s' % image_id, res.headers['location']) # Verify the status is 'queued' self.assertEqual('queued', res_body['status']) called = {'initiate_deletion': False} def mock_initiate_deletion(*args, **kwargs): called['initiate_deletion'] = True self.stubs.Set(glance.api.v1.upload_utils, 'initiate_deletion', mock_initiate_deletion) orig_update_image_metadata = registry.update_image_metadata data = b"somedata" def mock_update_image_metadata(*args, **kwargs): if args[2].get('size', None) == len(data): path = "/images/%s" % image_id req = unit_test_utils.get_fake_request(path=path, method='DELETE', is_admin=is_admin) res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.stubs.Set(registry, 'update_image_metadata', orig_update_image_metadata) return orig_update_image_metadata(*args, **kwargs) self.stubs.Set(registry, 'update_image_metadata', mock_update_image_metadata) req = unit_test_utils.get_fake_request(path="/images/%s" % image_id, method='PUT') req.headers['Content-Type'] = 'application/octet-stream' req.body = data res = req.get_response(self.api) self.assertEqual(412, res.status_int) self.assertFalse(res.location) self.assertTrue(called['initiate_deletion']) req = unit_test_utils.get_fake_request(path="/images/%s" % image_id, method='HEAD', is_admin=True) res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual('True', res.headers['x-image-meta-deleted']) self.assertEqual('deleted', res.headers['x-image-meta-status']) def test_delete_during_image_upload_by_normal_user(self): self._check_delete_during_image_upload(is_admin=False) def test_delete_during_image_upload_by_admin(self): self._check_delete_during_image_upload(is_admin=True) def test_disable_purge_props(self): """ Test the special x-glance-registry-purge-props header controls the purge property behaviour of the registry. :see LP Bug#901534 """ fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3', 'x-image-meta-property-key1': 'value1'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = b"chunk00000remainder" res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertIn('id', res_body) image_id = res_body['id'] self.assertIn('/images/%s' % image_id, res.headers['location']) # Verify the status is queued self.assertIn('status', res_body) self.assertEqual('active', res_body['status']) # Check properties are not deleted self.assertIn('properties', res_body) self.assertIn('key1', res_body['properties']) self.assertEqual('value1', res_body['properties']['key1']) # Now update the image, setting new properties without # passing the x-glance-registry-purge-props header and # verify that original properties are marked deleted. req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['x-image-meta-property-key2'] = 'value2' res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Verify the original property no longer in headers req = webob.Request.blank("/images/%s" % image_id) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertIn('x-image-meta-property-key2', res.headers, "Did not find required property in headers. " "Got headers: %r" % res.headers) self.assertNotIn('x-image-meta-property-key1', res.headers, "Found property in headers that was not expected. " "Got headers: %r" % res.headers) # Now update the image, setting new properties and # passing the x-glance-registry-purge-props header with # a value of "false" and verify that second property # still appears in headers. req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.headers['x-image-meta-property-key3'] = 'value3' req.headers['x-glance-registry-purge-props'] = 'false' res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Verify the second and third property in headers req = webob.Request.blank("/images/%s" % image_id) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertIn('x-image-meta-property-key2', res.headers, "Did not find required property in headers. " "Got headers: %r" % res.headers) self.assertIn('x-image-meta-property-key3', res.headers, "Did not find required property in headers. " "Got headers: %r" % res.headers) def test_publicize_image_unauthorized(self): """Create a non-public image then fail to make public""" rules = {"add_image": '@', "publicize_image": '!'} self.set_policy_rules(rules) fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-is-public': 'false', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] req = webob.Request.blank("/images/%s" % res_body['id']) req.method = 'PUT' req.headers['x-image-meta-is-public'] = 'true' res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_update_image_size_header_too_big(self): """Tests raises BadRequest for supplied image size that is too big""" fixture_headers = {'x-image-meta-size': CONF.image_size_cap + 1} req = webob.Request.blank("/images/%s" % UUID2) req.method = 'PUT' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_image_size_data_too_big(self): self.config(image_size_cap=512) fixture_headers = {'content-type': 'application/octet-stream'} req = webob.Request.blank("/images/%s" % UUID2) req.method = 'PUT' req.body = b'X' * (CONF.image_size_cap + 1) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_update_image_size_chunked_data_too_big(self): self.config(image_size_cap=512) # Create new image that has no data req = webob.Request.blank("/images") req.method = 'POST' req.headers['x-image-meta-name'] = 'something' req.headers['x-image-meta-container_format'] = 'ami' req.headers['x-image-meta-disk_format'] = 'ami' res = req.get_response(self.api) image_id = jsonutils.loads(res.body)['image']['id'] fixture_headers = { 'content-type': 'application/octet-stream', 'transfer-encoding': 'chunked', } req = webob.Request.blank("/images/%s" % image_id) req.method = 'PUT' req.body_file = six.StringIO('X' * (CONF.image_size_cap + 1)) for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(413, res.status_int) def test_update_non_existing_image(self): self.config(image_size_cap=100) req = webob.Request.blank("images/%s" % _gen_uuid) req.method = 'PUT' req.body = b'test' req.headers['x-image-meta-name'] = 'test' req.headers['x-image-meta-container_format'] = 'ami' req.headers['x-image-meta-disk_format'] = 'ami' req.headers['x-image-meta-is_public'] = 'False' res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_update_public_image(self): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-is-public': 'true', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] req = webob.Request.blank("/images/%s" % res_body['id']) req.method = 'PUT' req.headers['x-image-meta-name'] = 'updated public image' res = req.get_response(self.api) self.assertEqual(200, res.status_int) def test_add_image_wrong_content_type(self): fixture_headers = { 'x-image-meta-name': 'fake image #3', 'x-image-meta-container_format': 'ami', 'x-image-meta-disk_format': 'ami', 'transfer-encoding': 'chunked', 'content-type': 'application/octet-st', } req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_get_index_sort_name_asc(self): """ Tests that the /images API returns list of public images sorted alphabetically by name in ascending order. """ UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'asdf', 'size': 19, 'checksum': None} db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'xyz', 'size': 20, 'checksum': None} db_api.image_create(self.context, extra_fixture) req = webob.Request.blank('/images?sort_key=name&sort_dir=asc') res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(3, len(images)) self.assertEqual(UUID3, images[0]['id']) self.assertEqual(UUID2, images[1]['id']) self.assertEqual(UUID4, images[2]['id']) def test_get_details_filter_changes_since(self): """ Tests that the /images/detail API returns list of images that changed since the time defined by changes-since """ dt1 = timeutils.utcnow() - datetime.timedelta(1) iso1 = timeutils.isotime(dt1) date_only1 = dt1.strftime('%Y-%m-%d') date_only2 = dt1.strftime('%Y%m%d') date_only3 = dt1.strftime('%Y-%m%d') dt2 = timeutils.utcnow() + datetime.timedelta(1) iso2 = timeutils.isotime(dt2) image_ts = timeutils.utcnow() + datetime.timedelta(2) hour_before = image_ts.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00') hour_after = image_ts.strftime('%Y-%m-%dT%H:%M:%S-01:00') dt4 = timeutils.utcnow() + datetime.timedelta(3) iso4 = timeutils.isotime(dt4) UUID3 = _gen_uuid() extra_fixture = {'id': UUID3, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'fake image #3', 'size': 18, 'checksum': None} db_api.image_create(self.context, extra_fixture) db_api.image_destroy(self.context, UUID3) UUID4 = _gen_uuid() extra_fixture = {'id': UUID4, 'status': 'active', 'is_public': True, 'disk_format': 'ami', 'container_format': 'ami', 'name': 'fake image #4', 'size': 20, 'checksum': None, 'created_at': image_ts, 'updated_at': image_ts} db_api.image_create(self.context, extra_fixture) # Check a standard list, 4 images in db (2 deleted) req = webob.Request.blank('/images/detail') res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) self.assertEqual(UUID4, images[0]['id']) self.assertEqual(UUID2, images[1]['id']) # Expect 3 images (1 deleted) req = webob.Request.blank('/images/detail?changes-since=%s' % iso1) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(3, len(images)) self.assertEqual(UUID4, images[0]['id']) self.assertEqual(UUID3, images[1]['id']) # deleted self.assertEqual(UUID2, images[2]['id']) # Expect 1 images (0 deleted) req = webob.Request.blank('/images/detail?changes-since=%s' % iso2) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) self.assertEqual(UUID4, images[0]['id']) # Expect 1 images (0 deleted) req = webob.Request.blank('/images/detail?changes-since=%s' % hour_before) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) self.assertEqual(UUID4, images[0]['id']) # Expect 0 images (0 deleted) req = webob.Request.blank('/images/detail?changes-since=%s' % hour_after) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(0, len(images)) # Expect 0 images (0 deleted) req = webob.Request.blank('/images/detail?changes-since=%s' % iso4) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(0, len(images)) for param in [date_only1, date_only2, date_only3]: # Expect 3 images (1 deleted) req = webob.Request.blank('/images/detail?changes-since=%s' % param) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(3, len(images)) self.assertEqual(UUID4, images[0]['id']) self.assertEqual(UUID3, images[1]['id']) # deleted self.assertEqual(UUID2, images[2]['id']) # Bad request (empty changes-since param) req = webob.Request.blank('/images/detail?changes-since=') res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_get_images_bad_urls(self): """Check that routes collections are not on (LP bug 1185828)""" req = webob.Request.blank('/images/detail.xxx') res = req.get_response(self.api) self.assertEqual(404, res.status_int) req = webob.Request.blank('/images.xxx') res = req.get_response(self.api) self.assertEqual(404, res.status_int) req = webob.Request.blank('/images/new') res = req.get_response(self.api) self.assertEqual(404, res.status_int) req = webob.Request.blank("/images/%s/members" % UUID1) res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank("/images/%s/members.xxx" % UUID1) res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_get_index_filter_on_user_defined_properties(self): """Check that image filtering works on user-defined properties""" image1_id = _gen_uuid() properties = {'distro': 'ubuntu', 'arch': 'i386'} extra_fixture = {'id': image1_id, 'status': 'active', 'is_public': True, 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'image-extra-1', 'size': 18, 'properties': properties, 'checksum': None} db_api.image_create(self.context, extra_fixture) image2_id = _gen_uuid() properties = {'distro': 'ubuntu', 'arch': 'x86_64', 'foo': 'bar'} extra_fixture = {'id': image2_id, 'status': 'active', 'is_public': True, 'disk_format': 'ami', 'container_format': 'ami', 'name': 'image-extra-2', 'size': 20, 'properties': properties, 'checksum': None} db_api.image_create(self.context, extra_fixture) # Test index with filter containing one user-defined property. # Filter is 'property-distro=ubuntu'. # Verify both image1 and image2 are returned req = webob.Request.blank('/images?property-distro=ubuntu') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image1_id, images[1]['id']) # Test index with filter containing one user-defined property but # non-existent value. Filter is 'property-distro=fedora'. # Verify neither images are returned req = webob.Request.blank('/images?property-distro=fedora') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing one user-defined property but # unique value. Filter is 'property-arch=i386'. # Verify only image1 is returned. req = webob.Request.blank('/images?property-arch=i386') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image1_id, images[0]['id']) # Test index with filter containing one user-defined property but # unique value. Filter is 'property-arch=x86_64'. # Verify only image1 is returned. req = webob.Request.blank('/images?property-arch=x86_64') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Test index with filter containing unique user-defined property. # Filter is 'property-foo=bar'. # Verify only image2 is returned. req = webob.Request.blank('/images?property-foo=bar') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Test index with filter containing unique user-defined property but # .value is non-existent. Filter is 'property-foo=baz'. # Verify neither images are returned. req = webob.Request.blank('/images?property-foo=baz') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing multiple user-defined properties # Filter is 'property-arch=x86_64&property-distro=ubuntu'. # Verify only image2 is returned. req = webob.Request.blank('/images?property-arch=x86_64&' 'property-distro=ubuntu') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Test index with filter containing multiple user-defined properties # Filter is 'property-arch=i386&property-distro=ubuntu'. # Verify only image1 is returned. req = webob.Request.blank('/images?property-arch=i386&' 'property-distro=ubuntu') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image1_id, images[0]['id']) # Test index with filter containing multiple user-defined properties. # Filter is 'property-arch=random&property-distro=ubuntu'. # Verify neither images are returned. req = webob.Request.blank('/images?property-arch=random&' 'property-distro=ubuntu') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing multiple user-defined properties. # Filter is 'property-arch=random&property-distro=random'. # Verify neither images are returned. req = webob.Request.blank('/images?property-arch=random&' 'property-distro=random') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing multiple user-defined properties. # Filter is 'property-boo=far&property-poo=far'. # Verify neither images are returned. req = webob.Request.blank('/images?property-boo=far&' 'property-poo=far') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing multiple user-defined properties. # Filter is 'property-foo=bar&property-poo=far'. # Verify neither images are returned. req = webob.Request.blank('/images?property-foo=bar&' 'property-poo=far') res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) def test_get_images_detailed_unauthorized(self): rules = {"get_images": '!'} self.set_policy_rules(rules) req = webob.Request.blank('/images/detail') res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_get_images_unauthorized(self): rules = {"get_images": '!'} self.set_policy_rules(rules) req = webob.Request.blank('/images') res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_store_location_not_revealed(self): """ Test that the internal store location is NOT revealed through the API server """ # Check index and details... for url in ('/images', '/images/detail'): req = webob.Request.blank(url) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) images = res_dict['images'] num_locations = sum([1 for record in images if 'location' in record.keys()]) self.assertEqual(0, num_locations, images) # Check GET req = webob.Request.blank("/images/%s" % UUID2) res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertNotIn('X-Image-Meta-Location', res.headers) # Check HEAD req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertNotIn('X-Image-Meta-Location', res.headers) # Check PUT req = webob.Request.blank("/images/%s" % UUID2) req.body = res.body req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_body = jsonutils.loads(res.body) self.assertNotIn('location', res_body['image']) # Check POST req = webob.Request.blank("/images") headers = {'x-image-meta-location': 'http://localhost', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} for k, v in six.iteritems(headers): req.headers[k] = v req.method = 'POST' http = store.get_store_from_scheme('http') with mock.patch.object(http, 'get_size') as size: size.return_value = 0 res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body) self.assertNotIn('location', res_body['image']) def test_image_is_checksummed(self): """Test that the image contents are checksummed properly""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} image_contents = b"chunk00000remainder" image_checksum = hashlib.md5(image_contents).hexdigest() req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = image_contents res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual(image_checksum, res_body['checksum'], "Mismatched checksum. Expected %s, got %s" % (image_checksum, res_body['checksum'])) def test_etag_equals_checksum_header(self): """Test that the ETag header matches the x-image-meta-checksum""" fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} image_contents = b"chunk00000remainder" image_checksum = hashlib.md5(image_contents).hexdigest() req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = image_contents res = req.get_response(self.api) self.assertEqual(201, res.status_int) image = jsonutils.loads(res.body)['image'] # HEAD the image and check the ETag equals the checksum header... expected_headers = {'x-image-meta-checksum': image_checksum, 'etag': image_checksum} req = webob.Request.blank("/images/%s" % image['id']) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) for key in expected_headers.keys(): self.assertIn(key, res.headers, "required header '%s' missing from " "returned headers" % key) for key, value in six.iteritems(expected_headers): self.assertEqual(value, res.headers[key]) def test_bad_checksum_prevents_image_creation(self): """Test that the image contents are checksummed properly""" image_contents = b"chunk00000remainder" bad_checksum = hashlib.md5(b"invalid").hexdigest() fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3', 'x-image-meta-checksum': bad_checksum, 'x-image-meta-is-public': 'true'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v req.headers['Content-Type'] = 'application/octet-stream' req.body = image_contents res = req.get_response(self.api) self.assertEqual(400, res.status_int) # Test that only one image was returned (that already exists) req = webob.Request.blank("/images") req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) def test_image_meta(self): """Test for HEAD /images/""" expected_headers = {'x-image-meta-id': UUID2, 'x-image-meta-name': 'fake image #2'} req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertFalse(res.location) for key, value in six.iteritems(expected_headers): self.assertEqual(value, res.headers[key]) def test_image_meta_unauthorized(self): rules = {"get_image": '!'} self.set_policy_rules(rules) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_show_image_basic(self): req = webob.Request.blank("/images/%s" % UUID2) res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertFalse(res.location) self.assertEqual('application/octet-stream', res.content_type) self.assertEqual(b'chunk00000remainder', res.body) def test_show_non_exists_image(self): req = webob.Request.blank("/images/%s" % _gen_uuid()) res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_show_image_unauthorized(self): rules = {"get_image": '!'} self.set_policy_rules(rules) req = webob.Request.blank("/images/%s" % UUID2) res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_show_image_unauthorized_download(self): rules = {"download_image": '!'} self.set_policy_rules(rules) req = webob.Request.blank("/images/%s" % UUID2) res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_show_image_restricted_download_for_core_property(self): rules = { "restricted": "not ('1024M':%(min_ram)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) req = webob.Request.blank("/images/%s" % UUID2) req.headers['X-Auth-Token'] = 'user:tenant:_member_' req.headers['min_ram'] = '1024M' res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_show_image_restricted_download_for_custom_property(self): rules = { "restricted": "not ('test_1234'==%(x_test_key)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) req = webob.Request.blank("/images/%s" % UUID2) req.headers['X-Auth-Token'] = 'user:tenant:_member_' req.headers['x_test_key'] = 'test_1234' res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_download_service_unavailable(self): """Test image download returns HTTPServiceUnavailable.""" image_fixture = self.FIXTURES[1] image_fixture.update({'location': 'http://netloc/path/to/file.tar.gz'}) request = webob.Request.blank("/images/%s" % UUID2) request.context = self.context image_controller = glance.api.v1.images.Controller() with mock.patch.object(image_controller, 'get_active_image_meta_or_error' ) as mocked_get_image: mocked_get_image.return_value = image_fixture self.assertRaises(webob.exc.HTTPServiceUnavailable, image_controller.show, request, mocked_get_image) @mock.patch('glance_store._drivers.filesystem.Store.get') def test_show_image_store_get_not_support(self, m_get): m_get.side_effect = store.StoreGetNotSupported() req = webob.Request.blank("/images/%s" % UUID2) res = req.get_response(self.api) self.assertEqual(400, res.status_int) @mock.patch('glance_store._drivers.filesystem.Store.get') def test_show_image_store_random_get_not_support(self, m_get): m_get.side_effect = store.StoreRandomGetNotSupported(chunk_size=0, offset=0) req = webob.Request.blank("/images/%s" % UUID2) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_delete_image(self): req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertFalse(res.location) self.assertEqual(b'', res.body) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(404, res.status_int, res.body) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual('True', res.headers['x-image-meta-deleted']) self.assertEqual('deleted', res.headers['x-image-meta-status']) def test_delete_non_exists_image(self): req = webob.Request.blank("/images/%s" % _gen_uuid()) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_delete_not_allowed(self): # Verify we can get the image data req = webob.Request.blank("/images/%s" % UUID2) req.method = 'GET' req.headers['X-Auth-Token'] = 'user:tenant:' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual(19, len(res.body)) # Verify we cannot delete the image req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(403, res.status_int) # Verify the image data is still there req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual(19, len(res.body)) def test_delete_queued_image(self): """Delete an image in a queued state Bug #747799 demonstrated that trying to DELETE an image that had had its save process killed manually results in failure because the location attribute is None. Bug #1048851 demonstrated that the status was not properly being updated to 'deleted' from 'queued'. """ fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) # Now try to delete the image... req = webob.Request.blank("/images/%s" % res_body['id']) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s' % res_body['id']) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual('True', res.headers['x-image-meta-deleted']) self.assertEqual('deleted', res.headers['x-image-meta-status']) def test_delete_queued_image_delayed_delete(self): """Delete an image in a queued state when delayed_delete is on Bug #1048851 demonstrated that the status was not properly being updated to 'deleted' from 'queued'. """ self.config(delayed_delete=True) fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-name': 'fake image #3'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) # Now try to delete the image... req = webob.Request.blank("/images/%s" % res_body['id']) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s' % res_body['id']) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual('True', res.headers['x-image-meta-deleted']) self.assertEqual('deleted', res.headers['x-image-meta-status']) def test_delete_protected_image(self): fixture_headers = {'x-image-meta-store': 'file', 'x-image-meta-name': 'fake image #3', 'x-image-meta-disk-format': 'vhd', 'x-image-meta-container-format': 'ovf', 'x-image-meta-protected': 'True'} req = webob.Request.blank("/images") req.method = 'POST' for k, v in six.iteritems(fixture_headers): req.headers[k] = v res = req.get_response(self.api) self.assertEqual(201, res.status_int) res_body = jsonutils.loads(res.body)['image'] self.assertEqual('queued', res_body['status']) # Now try to delete the image... req = webob.Request.blank("/images/%s" % res_body['id']) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_delete_image_unauthorized(self): rules = {"delete_image": '!'} self.set_policy_rules(rules) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(403, res.status_int) def test_head_details(self): req = webob.Request.blank('/images/detail') req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(405, res.status_int) self.assertEqual('GET', res.headers.get('Allow')) self.assertEqual(('GET',), res.allow) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) def test_get_details_invalid_marker(self): """ Tests that the /images/detail API returns a 400 when an invalid marker is provided """ req = webob.Request.blank('/images/detail?marker=%s' % _gen_uuid()) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_get_image_members(self): """ Tests members listing for existing images """ req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) memb_list = jsonutils.loads(res.body) num_members = len(memb_list['members']) self.assertEqual(0, num_members) def test_get_image_members_allowed_by_policy(self): rules = {"get_members": '@'} self.set_policy_rules(rules) req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) memb_list = jsonutils.loads(res.body) num_members = len(memb_list['members']) self.assertEqual(0, num_members) def test_get_image_members_forbidden_by_policy(self): rules = {"get_members": '!'} self.set_policy_rules(rules) req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_get_image_members_not_existing(self): """ Tests proper exception is raised if attempt to get members of non-existing image """ req = webob.Request.blank('/images/%s/members' % _gen_uuid()) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_add_member_positive(self): """ Tests adding image members """ test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(204, res.status_int) def test_get_member_images(self): """ Tests image listing for members """ req = webob.Request.blank('/shared-images/pattieblack') req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) memb_list = jsonutils.loads(res.body) num_members = len(memb_list['shared_images']) self.assertEqual(0, num_members) def test_replace_members(self): """ Tests replacing image members raises right exception """ test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=False) fixture = dict(member_id='pattieblack') req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) res = req.get_response(self.api) self.assertEqual(401, res.status_int) def test_active_image_immutable_props_for_user(self): """ Tests user cannot update immutable props of active image """ test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=False) fixture_header_list = [{'x-image-meta-checksum': '1234'}, {'x-image-meta-size': '12345'}] for fixture_header in fixture_header_list: req = webob.Request.blank('/images/%s' % UUID2) req.method = 'PUT' for k, v in six.iteritems(fixture_header): req = webob.Request.blank('/images/%s' % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) orig_value = res.headers[k] req = webob.Request.blank('/images/%s' % UUID2) req.headers[k] = v req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(403, res.status_int) prop = k[len('x-image-meta-'):] body = res.body.decode('utf-8') self.assertNotEqual(-1, body.find( "Forbidden to modify '%s' of active image" % prop)) req = webob.Request.blank('/images/%s' % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual(orig_value, res.headers[k]) def test_deactivated_image_immutable_props_for_user(self): """ Tests user cannot update immutable props of deactivated image """ test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=False) fixture_header_list = [{'x-image-meta-checksum': '1234'}, {'x-image-meta-size': '12345'}] for fixture_header in fixture_header_list: req = webob.Request.blank('/images/%s' % UUID3) req.method = 'PUT' for k, v in six.iteritems(fixture_header): req = webob.Request.blank('/images/%s' % UUID3) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) orig_value = res.headers[k] req = webob.Request.blank('/images/%s' % UUID3) req.headers[k] = v req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(403, res.status_int) prop = k[len('x-image-meta-'):] body = res.body.decode('utf-8') self.assertNotEqual(-1, body.find( "Forbidden to modify '%s' of deactivated image" % prop)) req = webob.Request.blank('/images/%s' % UUID3) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual(orig_value, res.headers[k]) def test_props_of_active_image_mutable_for_admin(self): """ Tests admin can update 'immutable' props of active image """ test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=True) fixture_header_list = [{'x-image-meta-checksum': '1234'}, {'x-image-meta-size': '12345'}] for fixture_header in fixture_header_list: req = webob.Request.blank('/images/%s' % UUID2) req.method = 'PUT' for k, v in six.iteritems(fixture_header): req = webob.Request.blank('/images/%s' % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s' % UUID2) req.headers[k] = v req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s' % UUID2) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual(v, res.headers[k]) def test_props_of_deactivated_image_mutable_for_admin(self): """ Tests admin can update 'immutable' props of deactivated image """ test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=True) fixture_header_list = [{'x-image-meta-checksum': '1234'}, {'x-image-meta-size': '12345'}] for fixture_header in fixture_header_list: req = webob.Request.blank('/images/%s' % UUID3) req.method = 'PUT' for k, v in six.iteritems(fixture_header): req = webob.Request.blank('/images/%s' % UUID3) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s' % UUID3) req.headers[k] = v req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s' % UUID3) req.method = 'HEAD' res = req.get_response(self.api) self.assertEqual(200, res.status_int) self.assertEqual(v, res.headers[k]) def test_replace_members_non_existing_image(self): """ Tests replacing image members raises right exception """ test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=True) fixture = dict(member_id='pattieblack') req = webob.Request.blank('/images/%s/members' % _gen_uuid()) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_replace_members_bad_request(self): """ Tests replacing image members raises bad request if body is wrong """ test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=True) fixture = dict(member_id='pattieblack') req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) res = req.get_response(self.api) self.assertEqual(400, res.status_int) def test_replace_members_positive(self): """ Tests replacing image members """ test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router, is_admin=True) fixture = [dict(member_id='pattieblack', can_share=False)] # Replace req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) res = req.get_response(self.api) self.assertEqual(204, res.status_int) def test_replace_members_forbidden_by_policy(self): rules = {"modify_member": '!'} self.set_policy_rules(rules) self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), is_admin=True) fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] req = webob.Request.blank('/images/%s/members' % UUID1) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_replace_members_allowed_by_policy(self): rules = {"modify_member": '@'} self.set_policy_rules(rules) self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), is_admin=True) fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] req = webob.Request.blank('/images/%s/members' % UUID1) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_add_member_unauthorized(self): """ Tests adding image members raises right exception """ test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router, is_admin=False) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(401, res.status_int) def test_add_member_non_existing_image(self): """ Tests adding image members raises right exception """ test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router, is_admin=True) test_uri = '/images/%s/members/pattieblack' req = webob.Request.blank(test_uri % _gen_uuid()) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_add_member_with_body(self): """ Tests adding image members """ fixture = dict(can_share=True) test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router, is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' req.body = jsonutils.dump_as_bytes(dict(member=fixture)) res = req.get_response(self.api) self.assertEqual(204, res.status_int) def test_add_member_overlimit(self): self.config(image_member_quota=0) test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(413, res.status_int) def test_add_member_unlimited(self): self.config(image_member_quota=-1) test_router_api = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router_api, is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(204, res.status_int) def test_add_member_forbidden_by_policy(self): rules = {"modify_member": '!'} self.set_policy_rules(rules) self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID1) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) def test_add_member_allowed_by_policy(self): rules = {"modify_member": '@'} self.set_policy_rules(rules) self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID1) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_get_members_of_deleted_image_raises_404(self): """ Tests members listing for deleted image raises 404. """ req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) self.assertIn('Image with identifier %s has been deleted.' % UUID2, res.body.decode()) def test_delete_member_of_deleted_image_raises_404(self): """ Tests deleting members of deleted image raises 404. """ test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) self.assertIn('Image with identifier %s has been deleted.' % UUID2, res.body.decode()) def test_update_members_of_deleted_image_raises_404(self): """ Tests update members of deleted image raises 404. """ test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(204, res.status_int) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) body = res.body.decode('utf-8') self.assertIn( 'Image with identifier %s has been deleted.' % UUID2, body) def test_replace_members_of_image(self): test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'PUT' req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) res = req.get_response(self.api) self.assertEqual(204, res.status_int) req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) memb_list = jsonutils.loads(res.body) self.assertEqual(1, len(memb_list)) def test_replace_members_of_image_overlimit(self): # Set image_member_quota to 1 self.config(image_member_quota=1) test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) # PUT an original member entry fixture = [{'member_id': 'baz', 'can_share': False}] req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'PUT' req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) res = req.get_response(self.api) self.assertEqual(204, res.status_int) # GET original image member list req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) original_members = jsonutils.loads(res.body)['members'] self.assertEqual(1, len(original_members)) # PUT 2 image members to replace existing (overlimit) fixture = [{'member_id': 'foo1', 'can_share': False}, {'member_id': 'foo2', 'can_share': False}] req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'PUT' req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) res = req.get_response(self.api) self.assertEqual(413, res.status_int) # GET member list req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) # Assert the member list was not changed memb_list = jsonutils.loads(res.body)['members'] self.assertEqual(original_members, memb_list) def test_replace_members_of_image_unlimited(self): self.config(image_member_quota=-1) test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) fixture = [{'member_id': 'foo1', 'can_share': False}, {'member_id': 'foo2', 'can_share': False}] req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'PUT' req.body = jsonutils.dump_as_bytes(dict(memberships=fixture)) res = req.get_response(self.api) self.assertEqual(204, res.status_int) req = webob.Request.blank('/images/%s/members' % UUID2) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(200, res.status_int) memb_list = jsonutils.loads(res.body)['members'] self.assertEqual(fixture, memb_list) def test_create_member_to_deleted_image_raises_404(self): """ Tests adding members to deleted image raises 404. """ test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) req = webob.Request.blank("/images/%s" % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(200, res.status_int) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) self.assertIn('Image with identifier %s has been deleted.' % UUID2, res.body.decode()) def test_delete_member(self): """ Tests deleting image members raises right exception """ test_router = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_router, is_admin=False) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(401, res.status_int) def test_delete_member_on_non_existing_image(self): """ Tests deleting image members raises right exception """ test_router = router.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_router, is_admin=True) test_uri = '/images/%s/members/pattieblack' req = webob.Request.blank(test_uri % _gen_uuid()) req.method = 'DELETE' res = req.get_response(api) self.assertEqual(404, res.status_int) def test_delete_non_exist_member(self): """ Test deleting image members raises right exception """ test_router = router.API(self.mapper) api = test_utils.FakeAuthMiddleware( test_router, is_admin=True) req = webob.Request.blank('/images/%s/members/test_user' % UUID2) req.method = 'DELETE' res = req.get_response(api) self.assertEqual(404, res.status_int) def test_delete_image_member(self): test_rserver = router.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_rserver, is_admin=True) # Add member to image: fixture = dict(can_share=True) test_uri = '/images/%s/members/test_add_member_positive' req = webob.Request.blank(test_uri % UUID2) req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(member=fixture)) res = req.get_response(self.api) self.assertEqual(204, res.status_int) # Delete member test_uri = '/images/%s/members/test_add_member_positive' req = webob.Request.blank(test_uri % UUID2) req.headers['X-Auth-Token'] = 'test1:test1:' req.method = 'DELETE' req.content_type = 'application/json' res = req.get_response(self.api) self.assertEqual(404, res.status_int) self.assertIn(b'Forbidden', res.body) def test_delete_member_allowed_by_policy(self): rules = {"delete_member": '@', "modify_member": '@'} self.set_policy_rules(rules) self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_delete_member_forbidden_by_policy(self): rules = {"delete_member": '!', "modify_member": '@'} self.set_policy_rules(rules) self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper), is_admin=True) req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2) req.method = 'PUT' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) req.method = 'DELETE' res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) class TestImageSerializer(base.IsolatedUnitTest): def setUp(self): """Establish a clean test environment""" super(TestImageSerializer, self).setUp() self.receiving_user = 'fake_user' self.receiving_tenant = 2 self.context = glance.context.RequestContext( is_admin=True, user=self.receiving_user, tenant=self.receiving_tenant) self.serializer = glance.api.v1.images.ImageSerializer() def image_iter(): for x in [b'chunk', b'678911234', b'56789']: yield x self.FIXTURE = { 'image_iterator': image_iter(), 'image_meta': { 'id': UUID2, 'name': 'fake image #2', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(), 'deleted_at': None, 'deleted': False, 'checksum': '06ff575a2856444fbe93100157ed74ab92eb7eff', 'size': 19, 'owner': _gen_uuid(), 'location': "file:///tmp/glance-tests/2", 'properties': {}, } } def test_meta(self): exp_headers = {'x-image-meta-id': UUID2, 'x-image-meta-location': 'file:///tmp/glance-tests/2', 'ETag': self.FIXTURE['image_meta']['checksum'], 'x-image-meta-name': 'fake image #2'} req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' req.remote_addr = "1.2.3.4" req.context = self.context response = webob.Response(request=req) self.serializer.meta(response, self.FIXTURE) for key, value in six.iteritems(exp_headers): self.assertEqual(value, response.headers[key]) def test_meta_utf8(self): # We get unicode strings from JSON, and therefore all strings in the # metadata will actually be unicode when handled internally. But we # want to output utf-8. FIXTURE = { 'image_meta': { 'id': six.text_type(UUID2), 'name': u'fake image #2 with utf-8 éàè', 'status': u'active', 'disk_format': u'vhd', 'container_format': u'ovf', 'is_public': True, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(), 'deleted_at': None, 'deleted': False, 'checksum': u'06ff575a2856444fbe93100157ed74ab92eb7eff', 'size': 19, 'owner': six.text_type(_gen_uuid()), 'location': u"file:///tmp/glance-tests/2", 'properties': { u'prop_éé': u'ça marche', u'prop_çé': u'çé', } } } exp_headers = {'x-image-meta-id': UUID2, 'x-image-meta-location': 'file:///tmp/glance-tests/2', 'ETag': '06ff575a2856444fbe93100157ed74ab92eb7eff', 'x-image-meta-size': '19', # str, not int 'x-image-meta-name': 'fake image #2 with utf-8 éàè', 'x-image-meta-property-prop_éé': 'ça marche', 'x-image-meta-property-prop_çé': 'çé'} req = webob.Request.blank("/images/%s" % UUID2) req.method = 'HEAD' req.remote_addr = "1.2.3.4" req.context = self.context response = webob.Response(request=req) self.serializer.meta(response, FIXTURE) if six.PY2: self.assertNotEqual(type(FIXTURE['image_meta']['name']), type(response.headers['x-image-meta-name'])) if six.PY3: self.assertEqual(FIXTURE['image_meta']['name'], response.headers['x-image-meta-name']) else: self.assertEqual( FIXTURE['image_meta']['name'], response.headers['x-image-meta-name'].decode('utf-8')) for key, value in six.iteritems(exp_headers): self.assertEqual(value, response.headers[key]) if six.PY2: FIXTURE['image_meta']['properties'][u'prop_bad'] = 'çé' self.assertRaises(UnicodeDecodeError, self.serializer.meta, response, FIXTURE) def test_show(self): exp_headers = {'x-image-meta-id': UUID2, 'x-image-meta-location': 'file:///tmp/glance-tests/2', 'ETag': self.FIXTURE['image_meta']['checksum'], 'x-image-meta-name': 'fake image #2'} req = webob.Request.blank("/images/%s" % UUID2) req.method = 'GET' req.context = self.context response = webob.Response(request=req) self.serializer.show(response, self.FIXTURE) for key, value in six.iteritems(exp_headers): self.assertEqual(value, response.headers[key]) self.assertEqual(b'chunk67891123456789', response.body) def test_show_notify(self): """Make sure an eventlet posthook for notify_image_sent is added.""" req = webob.Request.blank("/images/%s" % UUID2) req.method = 'GET' req.context = self.context response = webob.Response(request=req) response.request.environ['eventlet.posthooks'] = [] self.serializer.show(response, self.FIXTURE) # just make sure the app_iter is called for chunk in response.app_iter: pass self.assertNotEqual([], response.request.environ['eventlet.posthooks']) def test_image_send_notification(self): req = webob.Request.blank("/images/%s" % UUID2) req.method = 'GET' req.remote_addr = '1.2.3.4' req.context = self.context image_meta = self.FIXTURE['image_meta'] called = {"notified": False} expected_payload = { 'bytes_sent': 19, 'image_id': UUID2, 'owner_id': image_meta['owner'], 'receiver_tenant_id': self.receiving_tenant, 'receiver_user_id': self.receiving_user, 'destination_ip': '1.2.3.4', } def fake_info(_event_type, _payload): self.assertEqual(expected_payload, _payload) called['notified'] = True self.stubs.Set(self.serializer.notifier, 'info', fake_info) glance.api.common.image_send_notification(19, 19, image_meta, req, self.serializer.notifier) self.assertTrue(called['notified']) def test_image_send_notification_error(self): """Ensure image.send notification is sent on error.""" req = webob.Request.blank("/images/%s" % UUID2) req.method = 'GET' req.remote_addr = '1.2.3.4' req.context = self.context image_meta = self.FIXTURE['image_meta'] called = {"notified": False} expected_payload = { 'bytes_sent': 17, 'image_id': UUID2, 'owner_id': image_meta['owner'], 'receiver_tenant_id': self.receiving_tenant, 'receiver_user_id': self.receiving_user, 'destination_ip': '1.2.3.4', } def fake_error(_event_type, _payload): self.assertEqual(expected_payload, _payload) called['notified'] = True self.stubs.Set(self.serializer.notifier, 'error', fake_error) # expected and actually sent bytes differ glance.api.common.image_send_notification(17, 19, image_meta, req, self.serializer.notifier) self.assertTrue(called['notified']) def test_redact_location(self): """Ensure location redaction does not change original metadata""" image_meta = {'size': 3, 'id': '123', 'location': 'http://localhost'} redacted_image_meta = {'size': 3, 'id': '123'} copy_image_meta = copy.deepcopy(image_meta) tmp_image_meta = glance.api.v1.images.redact_loc(image_meta) self.assertEqual(image_meta, copy_image_meta) self.assertEqual(redacted_image_meta, tmp_image_meta) def test_noop_redact_location(self): """Check no-op location redaction does not change original metadata""" image_meta = {'size': 3, 'id': '123'} redacted_image_meta = {'size': 3, 'id': '123'} copy_image_meta = copy.deepcopy(image_meta) tmp_image_meta = glance.api.v1.images.redact_loc(image_meta) self.assertEqual(image_meta, copy_image_meta) self.assertEqual(redacted_image_meta, tmp_image_meta) self.assertEqual(redacted_image_meta, image_meta) class TestFilterValidator(base.IsolatedUnitTest): def test_filter_validator(self): self.assertFalse(glance.api.v1.filters.validate('size_max', -1)) self.assertTrue(glance.api.v1.filters.validate('size_max', 1)) self.assertTrue(glance.api.v1.filters.validate('protected', 'True')) self.assertTrue(glance.api.v1.filters.validate('protected', 'FALSE')) self.assertFalse(glance.api.v1.filters.validate('protected', '-1')) class TestAPIProtectedProps(base.IsolatedUnitTest): def setUp(self): """Establish a clean test environment""" super(TestAPIProtectedProps, self).setUp() self.mapper = routes.Mapper() # turn on property protections self.set_property_protections() self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper)) db_api.get_engine() db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def tearDown(self): """Clear the test environment""" super(TestAPIProtectedProps, self).tearDown() self.destroy_fixtures() def destroy_fixtures(self): # Easiest to just drop the models and re-create them... db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def _create_admin_image(self, props=None): if props is None: props = {} request = unit_test_utils.get_fake_request(path='/images') headers = {'x-image-meta-disk-format': 'ami', 'x-image-meta-container-format': 'ami', 'x-image-meta-name': 'foo', 'x-image-meta-size': '0', 'x-auth-token': 'user:tenant:admin'} headers.update(props) for k, v in six.iteritems(headers): request.headers[k] = v created_image = request.get_response(self.api) res_body = jsonutils.loads(created_image.body)['image'] image_id = res_body['id'] return image_id def test_prop_protection_with_create_and_permitted_role(self): """ As admin role, create an image and verify permitted role 'member' can create a protected property """ image_id = self._create_admin_image() another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'x-image-meta-property-x_owner_foo': 'bar'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('bar', res_body['properties']['x_owner_foo']) def test_prop_protection_with_permitted_policy_config(self): """ As admin role, create an image and verify permitted role 'member' can create a protected property """ self.set_property_protections(use_policies=True) image_id = self._create_admin_image() another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:admin', 'x-image-meta-property-spl_create_prop_policy': 'bar'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('bar', res_body['properties']['spl_create_prop_policy']) def test_prop_protection_with_create_and_unpermitted_role(self): """ As admin role, create an image and verify unpermitted role 'fake_member' can *not* create a protected property """ image_id = self._create_admin_image() another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:fake_member', 'x-image-meta-property-x_owner_foo': 'bar'} for k, v in six.iteritems(headers): another_request.headers[k] = v another_request.get_response(self.api) output = another_request.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, output.status_int) self.assertIn("Property '%s' is protected" % "x_owner_foo", output.body.decode()) def test_prop_protection_with_show_and_permitted_role(self): """ As admin role, create an image with a protected property, and verify permitted role 'member' can read that protected property via HEAD """ image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( method='HEAD', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:member'} for k, v in six.iteritems(headers): another_request.headers[k] = v res2 = another_request.get_response(self.api) self.assertEqual('bar', res2.headers['x-image-meta-property-x_owner_foo']) def test_prop_protection_with_show_and_unpermitted_role(self): """ As admin role, create an image with a protected property, and verify permitted role 'fake_role' can *not* read that protected property via HEAD """ image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( method='HEAD', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:fake_role'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertEqual(b'', output.body) self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers) def test_prop_protection_with_get_and_permitted_role(self): """ As admin role, create an image with a protected property, and verify permitted role 'member' can read that protected property via GET """ image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( method='GET', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:member'} for k, v in six.iteritems(headers): another_request.headers[k] = v res2 = another_request.get_response(self.api) self.assertEqual('bar', res2.headers['x-image-meta-property-x_owner_foo']) def test_prop_protection_with_get_and_unpermitted_role(self): """ As admin role, create an image with a protected property, and verify permitted role 'fake_role' can *not* read that protected property via GET """ image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( method='GET', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:fake_role'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertEqual(b'', output.body) self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers) def test_prop_protection_with_detail_and_permitted_role(self): """ As admin role, create an image with a protected property, and verify permitted role 'member' can read that protected property via /images/detail """ self._create_admin_image({'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( method='GET', path='/images/detail') headers = {'x-auth-token': 'user:tenant:member'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) res_body = jsonutils.loads(output.body)['images'][0] self.assertEqual('bar', res_body['properties']['x_owner_foo']) def test_prop_protection_with_detail_and_permitted_policy(self): """ As admin role, create an image with a protected property, and verify permitted role 'member' can read that protected property via /images/detail """ self.set_property_protections(use_policies=True) self._create_admin_image({'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( method='GET', path='/images/detail') headers = {'x-auth-token': 'user:tenant:member'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) res_body = jsonutils.loads(output.body)['images'][0] self.assertEqual('bar', res_body['properties']['x_owner_foo']) def test_prop_protection_with_detail_and_unpermitted_role(self): """ As admin role, create an image with a protected property, and verify permitted role 'fake_role' can *not* read that protected property via /images/detail """ self._create_admin_image({'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( method='GET', path='/images/detail') headers = {'x-auth-token': 'user:tenant:fake_role'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) res_body = jsonutils.loads(output.body)['images'][0] self.assertNotIn('x-image-meta-property-x_owner_foo', res_body['properties']) def test_prop_protection_with_detail_and_unpermitted_policy(self): """ As admin role, create an image with a protected property, and verify permitted role 'fake_role' can *not* read that protected property via /images/detail """ self.set_property_protections(use_policies=True) self._create_admin_image({'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( method='GET', path='/images/detail') headers = {'x-auth-token': 'user:tenant:fake_role'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) res_body = jsonutils.loads(output.body)['images'][0] self.assertNotIn('x-image-meta-property-x_owner_foo', res_body['properties']) def test_prop_protection_with_update_and_permitted_role(self): """ As admin role, create an image with protected property, and verify permitted role 'member' can update that protected property """ image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'x-image-meta-property-x_owner_foo': 'baz'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('baz', res_body['properties']['x_owner_foo']) def test_prop_protection_with_update_and_permitted_policy(self): """ As admin role, create an image with protected property, and verify permitted role 'admin' can update that protected property """ self.set_property_protections(use_policies=True) image_id = self._create_admin_image( {'x-image-meta-property-spl_default_policy': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:admin', 'x-image-meta-property-spl_default_policy': 'baz'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('baz', res_body['properties']['spl_default_policy']) def test_prop_protection_with_update_and_unpermitted_role(self): """ As admin role, create an image with protected property, and verify unpermitted role 'fake_role' can *not* update that protected property """ image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:fake_role', 'x-image-meta-property-x_owner_foo': 'baz'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, output.status_int) self.assertIn("Property '%s' is protected" % "x_owner_foo", output.body.decode()) def test_prop_protection_with_update_and_unpermitted_policy(self): """ As admin role, create an image with protected property, and verify unpermitted role 'fake_role' can *not* update that protected property """ self.set_property_protections(use_policies=True) image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:fake_role', 'x-image-meta-property-x_owner_foo': 'baz'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, output.status_int) self.assertIn("Property '%s' is protected" % "x_owner_foo", output.body.decode()) def test_prop_protection_update_without_read(self): """ Test protected property cannot be updated without read permission """ image_id = self._create_admin_image( {'x-image-meta-property-spl_update_only_prop': 'foo'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:spl_role', 'x-image-meta-property-spl_update_only_prop': 'bar'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(webob.exc.HTTPForbidden.code, output.status_int) self.assertIn("Property '%s' is protected" % "spl_update_only_prop", output.body.decode()) def test_prop_protection_update_noop(self): """ Test protected property update is allowed as long as the user has read access and the value is unchanged """ image_id = self._create_admin_image( {'x-image-meta-property-spl_read_prop': 'foo'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:spl_role', 'x-image-meta-property-spl_read_prop': 'foo'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('foo', res_body['properties']['spl_read_prop']) self.assertEqual(200, output.status_int) def test_prop_protection_with_delete_and_permitted_role(self): """ As admin role, create an image with protected property, and verify permitted role 'member' can can delete that protected property """ image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual({}, res_body['properties']) def test_prop_protection_with_delete_and_permitted_policy(self): """ As admin role, create an image with protected property, and verify permitted role 'member' can can delete that protected property """ self.set_property_protections(use_policies=True) image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual({}, res_body['properties']) def test_prop_protection_with_delete_and_unpermitted_read(self): """ Test protected property cannot be deleted without read permission """ image_id = self._create_admin_image( {'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:fake_role', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers) another_request = unit_test_utils.get_fake_request( method='HEAD', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:admin'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertEqual(b'', output.body) self.assertEqual('bar', output.headers['x-image-meta-property-x_owner_foo']) def test_prop_protection_with_delete_and_unpermitted_delete(self): """ Test protected property cannot be deleted without delete permission """ image_id = self._create_admin_image( {'x-image-meta-property-spl_update_prop': 'foo'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:spl_role', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) self.assertIn("Property '%s' is protected" % "spl_update_prop", output.body.decode()) another_request = unit_test_utils.get_fake_request( method='HEAD', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:admin'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertEqual(b'', output.body) self.assertEqual( 'foo', output.headers['x-image-meta-property-spl_update_prop']) def test_read_protected_props_leak_with_update(self): """ Verify when updating props that ones we don't have read permission for are not disclosed """ image_id = self._create_admin_image( {'x-image-meta-property-spl_update_prop': '0', 'x-image-meta-property-foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:spl_role', 'x-image-meta-property-spl_update_prop': '1', 'X-Glance-Registry-Purge-Props': 'False'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('1', res_body['properties']['spl_update_prop']) self.assertNotIn('foo', res_body['properties']) def test_update_protected_props_mix_no_read(self): """ Create an image with two props - one only readable by admin, and one readable/updatable by member. Verify member can successfully update their property while the admin owned one is ignored transparently """ image_id = self._create_admin_image( {'x-image-meta-property-admin_foo': 'bar', 'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'x-image-meta-property-x_owner_foo': 'baz'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('baz', res_body['properties']['x_owner_foo']) self.assertNotIn('admin_foo', res_body['properties']) def test_update_protected_props_mix_read(self): """ Create an image with two props - one readable/updatable by admin, but also readable by spl_role. The other is readable/updatable by spl_role. Verify spl_role can successfully update their property but not the admin owned one """ custom_props = { 'x-image-meta-property-spl_read_only_prop': '1', 'x-image-meta-property-spl_update_prop': '2' } image_id = self._create_admin_image(custom_props) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') # verify spl_role can update it's prop headers = {'x-auth-token': 'user:tenant:spl_role', 'x-image-meta-property-spl_read_only_prop': '1', 'x-image-meta-property-spl_update_prop': '1'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual(200, output.status_int) self.assertEqual('1', res_body['properties']['spl_read_only_prop']) self.assertEqual('1', res_body['properties']['spl_update_prop']) # verify spl_role can not update admin controlled prop headers = {'x-auth-token': 'user:tenant:spl_role', 'x-image-meta-property-spl_read_only_prop': '2', 'x-image-meta-property-spl_update_prop': '1'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) def test_delete_protected_props_mix_no_read(self): """ Create an image with two props - one only readable by admin, and one readable/deletable by member. Verify member can successfully delete their property while the admin owned one is ignored transparently """ image_id = self._create_admin_image( {'x-image-meta-property-admin_foo': 'bar', 'x-image-meta-property-x_owner_foo': 'bar'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertNotIn('x_owner_foo', res_body['properties']) self.assertNotIn('admin_foo', res_body['properties']) def test_delete_protected_props_mix_read(self): """ Create an image with two props - one readable/deletable by admin, but also readable by spl_role. The other is readable/deletable by spl_role. Verify spl_role is forbidden to purge_props in this scenario without retaining the readable prop. """ custom_props = { 'x-image-meta-property-spl_read_only_prop': '1', 'x-image-meta-property-spl_delete_prop': '2' } image_id = self._create_admin_image(custom_props) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:spl_role', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) def test_create_protected_prop_check_case_insensitive(self): """ Verify that role check is case-insensitive i.e. the property marked with role Member is creatable by the member role """ image_id = self._create_admin_image() another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'x-image-meta-property-x_case_insensitive': '1'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('1', res_body['properties']['x_case_insensitive']) def test_read_protected_prop_check_case_insensitive(self): """ Verify that role check is case-insensitive i.e. the property marked with role Member is readable by the member role """ custom_props = { 'x-image-meta-property-x_case_insensitive': '1' } image_id = self._create_admin_image(custom_props) another_request = unit_test_utils.get_fake_request( method='HEAD', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:member'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertEqual(b'', output.body) self.assertEqual( '1', output.headers['x-image-meta-property-x_case_insensitive']) def test_update_protected_props_check_case_insensitive(self): """ Verify that role check is case-insensitive i.e. the property marked with role Member is updatable by the member role """ image_id = self._create_admin_image( {'x-image-meta-property-x_case_insensitive': '1'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'x-image-meta-property-x_case_insensitive': '2'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('2', res_body['properties']['x_case_insensitive']) def test_delete_protected_props_check_case_insensitive(self): """ Verify that role check is case-insensitive i.e. the property marked with role Member is deletable by the member role """ image_id = self._create_admin_image( {'x-image-meta-property-x_case_insensitive': '1'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual({}, res_body['properties']) def test_create_non_protected_prop(self): """ Verify property marked with special char '@' is creatable by an unknown role """ image_id = self._create_admin_image() another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:joe_soap', 'x-image-meta-property-x_all_permitted': '1'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('1', res_body['properties']['x_all_permitted']) def test_read_non_protected_prop(self): """ Verify property marked with special char '@' is readable by an unknown role """ custom_props = { 'x-image-meta-property-x_all_permitted': '1' } image_id = self._create_admin_image(custom_props) another_request = unit_test_utils.get_fake_request( method='HEAD', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:joe_soap'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertEqual(b'', output.body) self.assertEqual( '1', output.headers['x-image-meta-property-x_all_permitted']) def test_update_non_protected_prop(self): """ Verify property marked with special char '@' is updatable by an unknown role """ image_id = self._create_admin_image( {'x-image-meta-property-x_all_permitted': '1'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:joe_soap', 'x-image-meta-property-x_all_permitted': '2'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('2', res_body['properties']['x_all_permitted']) def test_delete_non_protected_prop(self): """ Verify property marked with special char '@' is deletable by an unknown role """ image_id = self._create_admin_image( {'x-image-meta-property-x_all_permitted': '1'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:joe_soap', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) res_body = jsonutils.loads(output.body)['image'] self.assertEqual({}, res_body['properties']) def test_create_locked_down_protected_prop(self): """ Verify a property protected by special char '!' is creatable by no one """ image_id = self._create_admin_image() another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'x-image-meta-property-x_none_permitted': '1'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) # also check admin can not create another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:admin', 'x-image-meta-property-x_none_permitted_admin': '1'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) def test_read_locked_down_protected_prop(self): """ Verify a property protected by special char '!' is readable by no one """ custom_props = { 'x-image-meta-property-x_none_read': '1' } image_id = self._create_admin_image(custom_props) another_request = unit_test_utils.get_fake_request( method='HEAD', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:member'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertNotIn('x_none_read', output.headers) # also check admin can not read another_request = unit_test_utils.get_fake_request( method='HEAD', path='/images/%s' % image_id) headers = {'x-auth-token': 'user:tenant:admin'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) self.assertNotIn('x_none_read', output.headers) def test_update_locked_down_protected_prop(self): """ Verify a property protected by special char '!' is updatable by no one """ image_id = self._create_admin_image( {'x-image-meta-property-x_none_update': '1'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'x-image-meta-property-x_none_update': '2'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) # also check admin can't update property another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:admin', 'x-image-meta-property-x_none_update': '2'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) def test_delete_locked_down_protected_prop(self): """ Verify a property protected by special char '!' is deletable by no one """ image_id = self._create_admin_image( {'x-image-meta-property-x_none_delete': '1'}) another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:member', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) # also check admin can't delete another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:admin', 'X-Glance-Registry-Purge-Props': 'True'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(403, output.status_int) class TestAPIPropertyQuotas(base.IsolatedUnitTest): def setUp(self): """Establish a clean test environment""" super(TestAPIPropertyQuotas, self).setUp() self.mapper = routes.Mapper() self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper)) db_api.get_engine() db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def _create_admin_image(self, props=None): if props is None: props = {} request = unit_test_utils.get_fake_request(path='/images') headers = {'x-image-meta-disk-format': 'ami', 'x-image-meta-container-format': 'ami', 'x-image-meta-name': 'foo', 'x-image-meta-size': '0', 'x-auth-token': 'user:tenant:admin'} headers.update(props) for k, v in six.iteritems(headers): request.headers[k] = v created_image = request.get_response(self.api) res_body = jsonutils.loads(created_image.body)['image'] image_id = res_body['id'] return image_id def test_update_image_with_too_many_properties(self): """ Ensure that updating image properties enforces the quota. """ self.config(image_property_quota=1) image_id = self._create_admin_image() another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:joe_soap', 'x-image-meta-property-x_all_permitted': '1', 'x-image-meta-property-x_all_permitted_foo': '2'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(413, output.status_int) self.assertIn("Attempted: 2, Maximum: 1", output.text) def test_update_image_with_too_many_properties_without_purge_props(self): """ Ensure that updating image properties counts existing image propertys when enforcing property quota. """ self.config(image_property_quota=1) request = unit_test_utils.get_fake_request(path='/images') headers = {'x-image-meta-disk-format': 'ami', 'x-image-meta-container-format': 'ami', 'x-image-meta-name': 'foo', 'x-image-meta-size': '0', 'x-image-meta-property-x_all_permitted_create': '1', 'x-auth-token': 'user:tenant:admin'} for k, v in six.iteritems(headers): request.headers[k] = v created_image = request.get_response(self.api) res_body = jsonutils.loads(created_image.body)['image'] image_id = res_body['id'] another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:joe_soap', 'x-glance-registry-purge-props': 'False', 'x-image-meta-property-x_all_permitted': '1'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(413, output.status_int) self.assertIn("Attempted: 2, Maximum: 1", output.text) def test_update_properties_without_purge_props_overwrite_value(self): """ Ensure that updating image properties does not count against image property quota. """ self.config(image_property_quota=2) request = unit_test_utils.get_fake_request(path='/images') headers = {'x-image-meta-disk-format': 'ami', 'x-image-meta-container-format': 'ami', 'x-image-meta-name': 'foo', 'x-image-meta-size': '0', 'x-image-meta-property-x_all_permitted_create': '1', 'x-auth-token': 'user:tenant:admin'} for k, v in six.iteritems(headers): request.headers[k] = v created_image = request.get_response(self.api) res_body = jsonutils.loads(created_image.body)['image'] image_id = res_body['id'] another_request = unit_test_utils.get_fake_request( path='/images/%s' % image_id, method='PUT') headers = {'x-auth-token': 'user:tenant:joe_soap', 'x-glance-registry-purge-props': 'False', 'x-image-meta-property-x_all_permitted_create': '3', 'x-image-meta-property-x_all_permitted': '1'} for k, v in six.iteritems(headers): another_request.headers[k] = v output = another_request.get_response(self.api) self.assertEqual(200, output.status_int) res_body = jsonutils.loads(output.body)['image'] self.assertEqual('1', res_body['properties']['x_all_permitted']) self.assertEqual('3', res_body['properties']['x_all_permitted_create']) glance-12.0.0/glance/tests/unit/v1/test_registry_api.py0000664000567000056710000025026412701407047024211 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_config import cfg from oslo_serialization import jsonutils import routes import six import webob import glance.api.common import glance.common.config from glance.common import crypt from glance.common import timeutils from glance import context from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import models as db_models from glance.registry.api import v1 as rserver from glance.tests.unit import base from glance.tests import utils as test_utils CONF = cfg.CONF _gen_uuid = lambda: str(uuid.uuid4()) UUID1 = _gen_uuid() UUID2 = _gen_uuid() class TestRegistryAPI(base.IsolatedUnitTest, test_utils.RegistryAPIMixIn): def setUp(self): """Establish a clean test environment""" super(TestRegistryAPI, self).setUp() self.mapper = routes.Mapper() self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=True) def _get_extra_fixture(id, name, **kwargs): return self.get_extra_fixture( id, name, locations=[{'url': "file:///%s/%s" % (self.test_dir, id), 'metadata': {}, 'status': 'active'}], **kwargs) self.FIXTURES = [ _get_extra_fixture(UUID1, 'fake image #1', is_public=False, disk_format='ami', container_format='ami', min_disk=0, min_ram=0, owner=123, size=13, properties={'type': 'kernel'}), _get_extra_fixture(UUID2, 'fake image #2', min_disk=5, min_ram=256, size=19, properties={})] self.context = context.RequestContext(is_admin=True) db_api.get_engine() self.destroy_fixtures() self.create_fixtures() def tearDown(self): """Clear the test environment""" super(TestRegistryAPI, self).tearDown() self.destroy_fixtures() def test_show(self): """ Tests that the /images/ registry API endpoint returns the expected image """ fixture = {'id': UUID2, 'name': 'fake image #2', 'size': 19, 'min_ram': 256, 'min_disk': 5, 'checksum': None} res = self.get_api_response_ext(200, '/images/%s' % UUID2) res_dict = jsonutils.loads(res.body) image = res_dict['image'] for k, v in six.iteritems(fixture): self.assertEqual(v, image[k]) def test_show_unknown(self): """ Tests that the /images/ registry API endpoint returns a 404 for an unknown image id """ self.get_api_response_ext(404, '/images/%s' % _gen_uuid()) def test_show_invalid(self): """ Tests that the /images/ registry API endpoint returns a 404 for an invalid (therefore unknown) image id """ self.get_api_response_ext(404, '/images/%s' % _gen_uuid()) def test_show_deleted_image_as_admin(self): """ Tests that the /images/ registry API endpoint returns a 200 for deleted image to admin user. """ # Delete image #2 self.get_api_response_ext(200, '/images/%s' % UUID2, method='DELETE') self.get_api_response_ext(200, '/images/%s' % UUID2) def test_show_deleted_image_as_nonadmin(self): """ Tests that the /images/ registry API endpoint returns a 404 for deleted image to non-admin user. """ # Delete image #2 self.get_api_response_ext(200, '/images/%s' % UUID2, method='DELETE') api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=False) self.get_api_response_ext(404, '/images/%s' % UUID2, api=api) def test_show_private_image_with_no_admin_user(self): UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, size=18, owner='test user', is_public=False) db_api.image_create(self.context, extra_fixture) test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) self.get_api_response_ext(404, '/images/%s' % UUID4, api=api) def test_get_root(self): """ Tests that the root registry API returns "index", which is a list of public images """ fixture = {'id': UUID2, 'size': 19, 'checksum': None} res = self.get_api_response_ext(200, url='/') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) for k, v in six.iteritems(fixture): self.assertEqual(v, images[0][k]) def test_get_index(self): """ Tests that the /images registry API returns list of public images """ fixture = {'id': UUID2, 'size': 19, 'checksum': None} res = self.get_api_response_ext(200) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) for k, v in six.iteritems(fixture): self.assertEqual(v, images[0][k]) def test_get_index_marker(self): """ Tests that the /images registry API returns list of public images that conforms to a marker query param """ time1 = timeutils.utcnow() + datetime.timedelta(seconds=5) time2 = timeutils.utcnow() + datetime.timedelta(seconds=4) time3 = timeutils.utcnow() UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, size=19, created_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=time2) db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID5, created_at=time3) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images?marker=%s' % UUID4) self.assertEqualImages(res, (UUID5, UUID2)) def test_get_index_unknown_marker(self): """ Tests that the /images registry API returns a 400 when an unknown marker is provided """ self.get_api_response_ext(400, url='/images?marker=%s' % _gen_uuid()) def test_get_index_malformed_marker(self): """ Tests that the /images registry API returns a 400 when a malformed marker is provided """ res = self.get_api_response_ext(400, url='/images?marker=4') self.assertIn(b'marker', res.body) def test_get_index_forbidden_marker(self): """ Tests that the /images registry API returns a 400 when a forbidden marker is provided """ test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) self.get_api_response_ext(400, url='/images?marker=%s' % UUID1, api=api) def test_get_index_limit(self): """ Tests that the /images registry API returns list of public images that conforms to a limit query param """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, size=19) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images?limit=1') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) # expect list to be sorted by created_at desc self.assertEqual(UUID4, images[0]['id']) def test_get_index_limit_negative(self): """ Tests that the /images registry API returns list of public images that conforms to a limit query param """ self.get_api_response_ext(400, url='/images?limit=-1') def test_get_index_limit_non_int(self): """ Tests that the /images registry API returns list of public images that conforms to a limit query param """ self.get_api_response_ext(400, url='/images?limit=a') def test_get_index_limit_marker(self): """ Tests that the /images registry API returns list of public images that conforms to limit and marker query params """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, size=19) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid()) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext( 200, url='/images?marker=%s&limit=1' % UUID3) self.assertEqualImages(res, (UUID2,)) def test_get_index_filter_on_user_defined_properties(self): """ Tests that /images registry API returns list of public images based a filter on user-defined properties. """ image1_id = _gen_uuid() properties = {'distro': 'ubuntu', 'arch': 'i386'} extra_fixture = self.get_fixture(id=image1_id, name='image-extra-1', properties=properties) db_api.image_create(self.context, extra_fixture) image2_id = _gen_uuid() properties = {'distro': 'ubuntu', 'arch': 'x86_64', 'foo': 'bar'} extra_fixture = self.get_fixture(id=image2_id, name='image-extra-2', properties=properties) db_api.image_create(self.context, extra_fixture) # Test index with filter containing one user-defined property. # Filter is 'property-distro=ubuntu'. # Verify both image1 and image2 are returned res = self.get_api_response_ext(200, url='/images?' 'property-distro=ubuntu') images = jsonutils.loads(res.body)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image1_id, images[1]['id']) # Test index with filter containing one user-defined property but # non-existent value. Filter is 'property-distro=fedora'. # Verify neither images are returned res = self.get_api_response_ext(200, url='/images?' 'property-distro=fedora') images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing one user-defined property but # unique value. Filter is 'property-arch=i386'. # Verify only image1 is returned. res = self.get_api_response_ext(200, url='/images?' 'property-arch=i386') images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image1_id, images[0]['id']) # Test index with filter containing one user-defined property but # unique value. Filter is 'property-arch=x86_64'. # Verify only image1 is returned. res = self.get_api_response_ext(200, url='/images?' 'property-arch=x86_64') images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Test index with filter containing unique user-defined property. # Filter is 'property-foo=bar'. # Verify only image2 is returned. res = self.get_api_response_ext(200, url='/images?property-foo=bar') images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Test index with filter containing unique user-defined property but # .value is non-existent. Filter is 'property-foo=baz'. # Verify neither images are returned. res = self.get_api_response_ext(200, url='/images?property-foo=baz') images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing multiple user-defined properties # Filter is 'property-arch=x86_64&property-distro=ubuntu'. # Verify only image2 is returned. res = self.get_api_response_ext(200, url='/images?' 'property-arch=x86_64&' 'property-distro=ubuntu') images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Test index with filter containing multiple user-defined properties # Filter is 'property-arch=i386&property-distro=ubuntu'. # Verify only image1 is returned. res = self.get_api_response_ext(200, url='/images?property-arch=i386&' 'property-distro=ubuntu') images = jsonutils.loads(res.body)['images'] self.assertEqual(1, len(images)) self.assertEqual(image1_id, images[0]['id']) # Test index with filter containing multiple user-defined properties. # Filter is 'property-arch=random&property-distro=ubuntu'. # Verify neither images are returned. res = self.get_api_response_ext(200, url='/images?' 'property-arch=random&' 'property-distro=ubuntu') images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing multiple user-defined properties. # Filter is 'property-arch=random&property-distro=random'. # Verify neither images are returned. res = self.get_api_response_ext(200, url='/images?' 'property-arch=random&' 'property-distro=random') images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing multiple user-defined properties. # Filter is 'property-boo=far&property-poo=far'. # Verify neither images are returned. res = self.get_api_response_ext(200, url='/images?property-boo=far&' 'property-poo=far') images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) # Test index with filter containing multiple user-defined properties. # Filter is 'property-foo=bar&property-poo=far'. # Verify neither images are returned. res = self.get_api_response_ext(200, url='/images?property-foo=bar&' 'property-poo=far') images = jsonutils.loads(res.body)['images'] self.assertEqual(0, len(images)) def test_get_index_filter_name(self): """ Tests that the /images registry API returns list of public images that have a specific name. This is really a sanity check, filtering is tested more in-depth using /images/detail """ extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123', size=19) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123') db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images?name=new name! #123') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) for image in images: self.assertEqual('new name! #123', image['name']) def test_get_index_sort_default_created_at_desc(self): """ Tests that the /images registry API returns list of public images that conforms to a default sort key/dir """ time1 = timeutils.utcnow() + datetime.timedelta(seconds=5) time2 = timeutils.utcnow() + datetime.timedelta(seconds=4) time3 = timeutils.utcnow() UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, size=19, created_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=time2) db_api.image_create(self.context, extra_fixture) UUID5 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID5, created_at=time3) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images') self.assertEqualImages(res, (UUID3, UUID4, UUID5, UUID2)) def test_get_index_bad_sort_key(self): """Ensure a 400 is returned when a bad sort_key is provided.""" self.get_api_response_ext(400, url='/images?sort_key=asdf') def test_get_index_bad_sort_dir(self): """Ensure a 400 is returned when a bad sort_dir is provided.""" self.get_api_response_ext(400, url='/images?sort_dir=asdf') def test_get_index_null_name(self): """Check 200 is returned when sort_key is null name Check 200 is returned when sort_key is name and name is null for specified marker """ UUID6 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID6, name=None) db_api.image_create(self.context, extra_fixture) self.get_api_response_ext( 200, url='/images?sort_key=name&marker=%s' % UUID6) def test_get_index_null_disk_format(self): """Check 200 is returned when sort_key is null disk_format Check 200 is returned when sort_key is disk_format and disk_format is null for specified marker """ UUID6 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID6, disk_format=None, size=19) db_api.image_create(self.context, extra_fixture) self.get_api_response_ext( 200, url='/images?sort_key=disk_format&marker=%s' % UUID6) def test_get_index_null_container_format(self): """Check 200 is returned when sort_key is null container_format Check 200 is returned when sort_key is container_format and container_format is null for specified marker """ UUID6 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID6, container_format=None) db_api.image_create(self.context, extra_fixture) self.get_api_response_ext( 200, url='/images?sort_key=container_format&marker=%s' % UUID6) def test_get_index_sort_name_asc(self): """ Tests that the /images registry API returns list of public images sorted alphabetically by name in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', size=19) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz') db_api.image_create(self.context, extra_fixture) url = '/images?sort_key=name&sort_dir=asc' res = self.get_api_response_ext(200, url=url) self.assertEqualImages(res, (UUID3, UUID2, UUID4)) def test_get_index_sort_status_desc(self): """ Tests that the /images registry API returns list of public images sorted alphabetically by status in descending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, status='queued', size=19) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url=( '/images?sort_key=status&sort_dir=desc')) self.assertEqualImages(res, (UUID3, UUID4, UUID2)) def test_get_index_sort_disk_format_asc(self): """ Tests that the /images registry API returns list of public images sorted alphabetically by disk_format in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, disk_format='ami', container_format='ami', size=19) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, disk_format='vdi') db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url=( '/images?sort_key=disk_format&sort_dir=asc')) self.assertEqualImages(res, (UUID3, UUID4, UUID2)) def test_get_index_sort_container_format_desc(self): """ Tests that the /images registry API returns list of public images sorted alphabetically by container_format in descending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, size=19, disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, disk_format='iso', container_format='bare') db_api.image_create(self.context, extra_fixture) url = '/images?sort_key=container_format&sort_dir=desc' res = self.get_api_response_ext(200, url=url) self.assertEqualImages(res, (UUID2, UUID4, UUID3)) def test_get_index_sort_size_asc(self): """ Tests that the /images registry API returns list of public images sorted by size in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, disk_format='ami', container_format='ami', size=100) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, disk_format='iso', container_format='bare', size=2) db_api.image_create(self.context, extra_fixture) url = '/images?sort_key=size&sort_dir=asc' res = self.get_api_response_ext(200, url=url) self.assertEqualImages(res, (UUID4, UUID2, UUID3)) def test_get_index_sort_created_at_asc(self): """ Tests that the /images registry API returns list of public images sorted by created_at in ascending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=time1, size=19) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=time2) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url=( '/images?sort_key=created_at&sort_dir=asc')) self.assertEqualImages(res, (UUID2, UUID4, UUID3)) def test_get_index_sort_updated_at_desc(self): """ Tests that the /images registry API returns list of public images sorted by updated_at in descending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, size=19, created_at=None, updated_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=None, updated_at=time2) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url=( '/images?sort_key=updated_at&sort_dir=desc')) self.assertEqualImages(res, (UUID3, UUID4, UUID2)) def test_get_details(self): """ Tests that the /images/detail registry API returns a mapping containing a list of detailed image information """ fixture = {'id': UUID2, 'name': 'fake image #2', 'is_public': True, 'size': 19, 'min_disk': 5, 'min_ram': 256, 'checksum': None, 'disk_format': 'vhd', 'container_format': 'ovf', 'status': 'active'} res = self.get_api_response_ext(200, url='/images/detail') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) for k, v in six.iteritems(fixture): self.assertEqual(v, images[0][k]) def test_get_details_limit_marker(self): """ Tests that the /images/details registry API returns list of public images that conforms to limit and marker query params. This functionality is tested more thoroughly on /images, this is just a sanity check """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, size=20) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid()) db_api.image_create(self.context, extra_fixture) url = '/images/detail?marker=%s&limit=1' % UUID3 res = self.get_api_response_ext(200, url=url) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) # expect list to be sorted by created_at desc self.assertEqual(UUID2, images[0]['id']) def test_get_details_invalid_marker(self): """ Tests that the /images/detail registry API returns a 400 when an invalid marker is provided """ url = '/images/detail?marker=%s' % _gen_uuid() self.get_api_response_ext(400, url=url) def test_get_details_malformed_marker(self): """ Tests that the /images/detail registry API returns a 400 when a malformed marker is provided """ res = self.get_api_response_ext(400, url='/images/detail?marker=4') self.assertIn(b'marker', res.body) def test_get_details_forbidden_marker(self): """ Tests that the /images/detail registry API returns a 400 when a forbidden marker is provided """ test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) self.get_api_response_ext(400, api=api, url='/images/detail?marker=%s' % UUID1) def test_get_details_filter_name(self): """ Tests that the /images/detail registry API returns list of public images that have a specific name """ extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123', size=20) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123') db_api.image_create(self.context, extra_fixture) url = '/images/detail?name=new name! #123' res = self.get_api_response_ext(200, url=url) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) for image in images: self.assertEqual('new name! #123', image['name']) def test_get_details_filter_status(self): """ Tests that the /images/detail registry API returns list of public images that have a specific status """ extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), size=19, status='active') db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?status=saving') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) for image in images: self.assertEqual('saving', image['status']) def test_get_details_filter_container_format(self): """ Tests that the /images/detail registry API returns list of public images that have a specific container_format """ extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='vdi', size=19) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', container_format='ami', size=19) db_api.image_create(self.context, extra_fixture) url = '/images/detail?container_format=ovf' res = self.get_api_response_ext(200, url=url) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) for image in images: self.assertEqual('ovf', image['container_format']) def test_get_details_filter_min_disk(self): """ Tests that the /images/detail registry API returns list of public images that have a specific min_disk """ extra_fixture = self.get_fixture(id=_gen_uuid(), min_disk=7, size=19) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', container_format='ami', size=19) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?min_disk=7') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) for image in images: self.assertEqual(7, image['min_disk']) def test_get_details_filter_min_ram(self): """ Tests that the /images/detail registry API returns list of public images that have a specific min_ram """ extra_fixture = self.get_fixture(id=_gen_uuid(), min_ram=514, size=19) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', container_format='ami', size=19) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?min_ram=514') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) for image in images: self.assertEqual(514, image['min_ram']) def test_get_details_filter_disk_format(self): """ Tests that the /images/detail registry API returns list of public images that have a specific disk_format """ extra_fixture = self.get_fixture(id=_gen_uuid(), size=19) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', container_format='ami', size=19) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?disk_format=vhd') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) for image in images: self.assertEqual('vhd', image['disk_format']) def test_get_details_filter_size_min(self): """ Tests that the /images/detail registry API returns list of public images that have a size greater than or equal to size_min """ extra_fixture = self.get_fixture(id=_gen_uuid(), size=18) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?size_min=19') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) for image in images: self.assertGreaterEqual(image['size'], 19) def test_get_details_filter_size_max(self): """ Tests that the /images/detail registry API returns list of public images that have a size less than or equal to size_max """ extra_fixture = self.get_fixture(id=_gen_uuid(), size=18) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?size_max=19') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) for image in images: self.assertLessEqual(image['size'], 19) def test_get_details_filter_size_min_max(self): """ Tests that the /images/detail registry API returns list of public images that have a size less than or equal to size_max and greater than or equal to size_min """ extra_fixture = self.get_fixture(id=_gen_uuid(), size=18) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), size=6) db_api.image_create(self.context, extra_fixture) url = '/images/detail?size_min=18&size_max=19' res = self.get_api_response_ext(200, url=url) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) for image in images: self.assertTrue(18 <= image['size'] <= 19) def test_get_details_filter_changes_since(self): """ Tests that the /images/detail registry API returns list of images that changed since the time defined by changes-since """ dt1 = timeutils.utcnow() - datetime.timedelta(1) iso1 = timeutils.isotime(dt1) date_only1 = dt1.strftime('%Y-%m-%d') date_only2 = dt1.strftime('%Y%m%d') date_only3 = dt1.strftime('%Y-%m%d') dt2 = timeutils.utcnow() + datetime.timedelta(1) iso2 = timeutils.isotime(dt2) image_ts = timeutils.utcnow() + datetime.timedelta(2) hour_before = image_ts.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00') hour_after = image_ts.strftime('%Y-%m-%dT%H:%M:%S-01:00') dt4 = timeutils.utcnow() + datetime.timedelta(3) iso4 = timeutils.isotime(dt4) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, size=18) db_api.image_create(self.context, extra_fixture) db_api.image_destroy(self.context, UUID3) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, disk_format='ami', container_format='ami', created_at=image_ts, updated_at=image_ts) db_api.image_create(self.context, extra_fixture) # Check a standard list, 4 images in db (2 deleted) res = self.get_api_response_ext(200, url='/images/detail') self.assertEqualImages(res, (UUID4, UUID2)) # Expect 3 images (1 deleted) res = self.get_api_response_ext(200, url=( '/images/detail?changes-since=%s' % iso1)) self.assertEqualImages(res, (UUID4, UUID3, UUID2)) # Expect 1 images (0 deleted) res = self.get_api_response_ext(200, url=( '/images/detail?changes-since=%s' % iso2)) self.assertEqualImages(res, (UUID4,)) # Expect 1 images (0 deleted) res = self.get_api_response_ext(200, url=( '/images/detail?changes-since=%s' % hour_before)) self.assertEqualImages(res, (UUID4,)) # Expect 0 images (0 deleted) res = self.get_api_response_ext(200, url=( '/images/detail?changes-since=%s' % hour_after)) self.assertEqualImages(res, ()) # Expect 0 images (0 deleted) res = self.get_api_response_ext(200, url=( '/images/detail?changes-since=%s' % iso4)) self.assertEqualImages(res, ()) for param in [date_only1, date_only2, date_only3]: # Expect 3 images (1 deleted) res = self.get_api_response_ext(200, url=( '/images/detail?changes-since=%s' % param)) self.assertEqualImages(res, (UUID4, UUID3, UUID2)) # Bad request (empty changes-since param) self.get_api_response_ext(400, url='/images/detail?changes-since=') def test_get_details_filter_property(self): """ Tests that the /images/detail registry API returns list of public images that have a specific custom property """ extra_fixture = self.get_fixture(id=_gen_uuid(), size=19, properties={'prop_123': 'v a'}) db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), size=19, disk_format='ami', container_format='ami', properties={'prop_123': 'v b'}) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url=( '/images/detail?property-prop_123=v%20a')) res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) for image in images: self.assertEqual('v a', image['properties']['prop_123']) def test_get_details_filter_public_none(self): """ Tests that the /images/detail registry API returns list of all images if is_public none is passed """ extra_fixture = self.get_fixture(id=_gen_uuid(), is_public=False, size=18) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?is_public=None') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(3, len(images)) def test_get_details_filter_public_false(self): """ Tests that the /images/detail registry API returns list of private images if is_public false is passed """ extra_fixture = self.get_fixture(id=_gen_uuid(), is_public=False, size=18) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?is_public=False') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(2, len(images)) for image in images: self.assertEqual(False, image['is_public']) def test_get_details_filter_public_true(self): """ Tests that the /images/detail registry API returns list of public images if is_public true is passed (same as default) """ extra_fixture = self.get_fixture(id=_gen_uuid(), is_public=False, size=18) db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?is_public=True') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) for image in images: self.assertTrue(image['is_public']) def test_get_details_filter_public_string_format(self): """ Tests that the /images/detail registry API returns 400 Bad error for filter is_public with wrong format """ extra_fixture = self.get_fixture(id=_gen_uuid(), is_public='true', size=18) db_api.image_create(self.context, extra_fixture) self.get_api_response_ext(400, url='/images/detail?is_public=public') def test_get_details_filter_deleted_false(self): """ Test that the /images/detail registry API return list of images with deleted filter = false """ extra_fixture = {'id': _gen_uuid(), 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'name': 'test deleted filter 1', 'size': 18, 'deleted': False, 'checksum': None} db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url='/images/detail?deleted=False') res_dict = jsonutils.loads(res.body) images = res_dict['images'] for image in images: self.assertFalse(image['deleted']) def test_get_filter_no_public_with_no_admin(self): """ Tests that the /images/detail registry API returns list of public images if is_public true is passed (same as default) """ UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, is_public=False, size=18) db_api.image_create(self.context, extra_fixture) test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) res = self.get_api_response_ext(200, api=api, url='/images/detail?is_public=False') res_dict = jsonutils.loads(res.body) images = res_dict['images'] self.assertEqual(1, len(images)) # Check that for non admin user only is_public = True images returns for image in images: self.assertTrue(image['is_public']) def test_get_filter_protected_with_None_value(self): """ Tests that the /images/detail registry API returns 400 error """ extra_fixture = self.get_fixture(id=_gen_uuid(), size=18, protected="False") db_api.image_create(self.context, extra_fixture) self.get_api_response_ext(400, url='/images/detail?protected=') def test_get_filter_protected_with_True_value(self): """ Tests that the /images/detail registry API returns 400 error """ extra_fixture = self.get_fixture(id=_gen_uuid(), size=18, protected="True") db_api.image_create(self.context, extra_fixture) self.get_api_response_ext(200, url='/images/detail?protected=True') def test_get_details_sort_name_asc(self): """ Tests that the /images/details registry API returns list of public images sorted alphabetically by name in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', size=19) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz') db_api.image_create(self.context, extra_fixture) res = self.get_api_response_ext(200, url=( '/images/detail?sort_key=name&sort_dir=asc')) self.assertEqualImages(res, (UUID3, UUID2, UUID4)) def test_create_image(self): """Tests that the /images POST registry API creates the image""" fixture = self.get_minimal_fixture() body = jsonutils.dump_as_bytes(dict(image=fixture)) res = self.get_api_response_ext(200, body=body, method='POST', content_type='json') res_dict = jsonutils.loads(res.body) for k, v in six.iteritems(fixture): self.assertEqual(v, res_dict['image'][k]) # Test status was updated properly self.assertEqual('active', res_dict['image']['status']) def test_create_image_with_min_disk(self): """Tests that the /images POST registry API creates the image""" fixture = self.get_minimal_fixture(min_disk=5) body = jsonutils.dump_as_bytes(dict(image=fixture)) res = self.get_api_response_ext(200, body=body, method='POST', content_type='json') res_dict = jsonutils.loads(res.body) self.assertEqual(5, res_dict['image']['min_disk']) def test_create_image_with_min_ram(self): """Tests that the /images POST registry API creates the image""" fixture = self.get_minimal_fixture(min_ram=256) body = jsonutils.dump_as_bytes(dict(image=fixture)) res = self.get_api_response_ext(200, body=body, method='POST', content_type='json') res_dict = jsonutils.loads(res.body) self.assertEqual(256, res_dict['image']['min_ram']) def test_create_image_with_min_ram_default(self): """Tests that the /images POST registry API creates the image""" fixture = self.get_minimal_fixture() body = jsonutils.dump_as_bytes(dict(image=fixture)) res = self.get_api_response_ext(200, body=body, method='POST', content_type='json') res_dict = jsonutils.loads(res.body) self.assertEqual(0, res_dict['image']['min_ram']) def test_create_image_with_min_disk_default(self): """Tests that the /images POST registry API creates the image""" fixture = self.get_minimal_fixture() body = jsonutils.dump_as_bytes(dict(image=fixture)) res = self.get_api_response_ext(200, body=body, method='POST', content_type='json') res_dict = jsonutils.loads(res.body) self.assertEqual(0, res_dict['image']['min_disk']) def test_create_image_with_bad_status(self): """Tests proper exception is raised if a bad status is set""" fixture = self.get_minimal_fixture(id=_gen_uuid(), status='bad status') body = jsonutils.dump_as_bytes(dict(image=fixture)) res = self.get_api_response_ext(400, body=body, method='POST', content_type='json') self.assertIn(b'Invalid image status', res.body) def test_create_image_with_bad_id(self): """Tests proper exception is raised if a bad disk_format is set""" fixture = self.get_minimal_fixture(id='asdf') body = jsonutils.dump_as_bytes(dict(image=fixture)) self.get_api_response_ext(400, content_type='json', method='POST', body=body) def test_create_image_with_image_id_in_log(self): """Tests correct image id in log message when creating image""" fixture = self.get_minimal_fixture( id='0564c64c-3545-4e34-abfb-9d18e5f2f2f9') self.log_image_id = False def fake_log_info(msg, image_data): if ('0564c64c-3545-4e34-abfb-9d18e5f2f2f9' == image_data['id'] and 'Successfully created image' in msg): self.log_image_id = True self.stubs.Set(rserver.images.LOG, 'info', fake_log_info) body = jsonutils.dump_as_bytes(dict(image=fixture)) self.get_api_response_ext(200, content_type='json', method='POST', body=body) self.assertTrue(self.log_image_id) def test_update_image(self): """Tests that the /images PUT registry API updates the image""" fixture = {'name': 'fake public image #2', 'min_disk': 5, 'min_ram': 256, 'disk_format': 'raw'} body = jsonutils.dump_as_bytes(dict(image=fixture)) res = self.get_api_response_ext(200, url='/images/%s' % UUID2, body=body, method='PUT', content_type='json') res_dict = jsonutils.loads(res.body) self.assertNotEqual(res_dict['image']['created_at'], res_dict['image']['updated_at']) for k, v in six.iteritems(fixture): self.assertEqual(v, res_dict['image'][k]) @mock.patch.object(rserver.images.LOG, 'debug') def test_update_image_not_log_sensitive_info(self, log_debug): """ Tests that there is no any sensitive info of image location was logged in glance during the image update operation. """ def fake_log_debug(fmt_str, image_meta): self.assertNotIn("'locations'", fmt_str % image_meta) fixture = {'name': 'fake public image #2', 'min_disk': 5, 'min_ram': 256, 'disk_format': 'raw', 'location': 'fake://image'} body = jsonutils.dump_as_bytes(dict(image=fixture)) log_debug.side_effect = fake_log_debug res = self.get_api_response_ext(200, url='/images/%s' % UUID2, body=body, method='PUT', content_type='json') res_dict = jsonutils.loads(res.body) self.assertNotEqual(res_dict['image']['created_at'], res_dict['image']['updated_at']) for k, v in six.iteritems(fixture): self.assertEqual(v, res_dict['image'][k]) def test_update_image_not_existing(self): """ Tests proper exception is raised if attempt to update non-existing image """ fixture = {'status': 'killed'} body = jsonutils.dump_as_bytes(dict(image=fixture)) self.get_api_response_ext(404, url='/images/%s' % _gen_uuid(), method='PUT', body=body, content_type='json') def test_update_image_with_bad_status(self): """Tests that exception raised trying to set a bad status""" fixture = {'status': 'invalid'} body = jsonutils.dump_as_bytes(dict(image=fixture)) res = self.get_api_response_ext(400, method='PUT', body=body, url='/images/%s' % UUID2, content_type='json') self.assertIn(b'Invalid image status', res.body) def test_update_private_image_no_admin(self): """ Tests proper exception is raised if attempt to update private image with non admin user, that not belongs to it """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, is_public=False, protected=True, owner='test user') db_api.image_create(self.context, extra_fixture) test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) body = jsonutils.dump_as_bytes(dict(image=extra_fixture)) self.get_api_response_ext(404, body=body, api=api, url='/images/%s' % UUID8, method='PUT', content_type='json') def test_delete_image(self): """Tests that the /images DELETE registry API deletes the image""" # Grab the original number of images res = self.get_api_response_ext(200) res_dict = jsonutils.loads(res.body) orig_num_images = len(res_dict['images']) # Delete image #2 self.get_api_response_ext(200, url='/images/%s' % UUID2, method='DELETE') # Verify one less image res = self.get_api_response_ext(200) res_dict = jsonutils.loads(res.body) new_num_images = len(res_dict['images']) self.assertEqual(orig_num_images - 1, new_num_images) def test_delete_image_response(self): """Tests that the registry API delete returns the image metadata""" image = self.FIXTURES[0] res = self.get_api_response_ext(200, url='/images/%s' % image['id'], method='DELETE') deleted_image = jsonutils.loads(res.body)['image'] self.assertEqual(image['id'], deleted_image['id']) self.assertTrue(deleted_image['deleted']) self.assertTrue(deleted_image['deleted_at']) def test_delete_image_not_existing(self): """ Tests proper exception is raised if attempt to delete non-existing image """ self.get_api_response_ext(404, url='/images/%s' % _gen_uuid(), method='DELETE') def test_delete_public_image_no_admin(self): """ Tests proper exception is raised if attempt to delete public image with non admin user """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=True, owner='test user') db_api.image_create(self.context, extra_fixture) test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) self.get_api_response_ext(403, url='/images/%s' % UUID8, method='DELETE', api=api) def test_delete_private_image_no_admin(self): """ Tests proper exception is raised if attempt to delete private image with non admin user, that not belongs to it """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, is_public=False, size=19, protected=True, owner='test user') db_api.image_create(self.context, extra_fixture) test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) self.get_api_response_ext(404, url='/images/%s' % UUID8, method='DELETE', api=api) def test_get_image_members(self): """ Tests members listing for existing images """ res = self.get_api_response_ext(200, url='/images/%s/members' % UUID2, method='GET') memb_list = jsonutils.loads(res.body) num_members = len(memb_list['members']) self.assertEqual(0, num_members) def test_get_image_members_not_existing(self): """ Tests proper exception is raised if attempt to get members of non-existing image """ self.get_api_response_ext(404, method='GET', url='/images/%s/members' % _gen_uuid()) def test_get_image_members_forbidden(self): """ Tests proper exception is raised if attempt to get members of non-existing image """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, is_public=False, size=19, protected=True, owner='test user') db_api.image_create(self.context, extra_fixture) test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) self.get_api_response_ext(404, url='/images/%s/members' % UUID8, method='GET', api=api) def test_get_member_images(self): """ Tests image listing for members """ res = self.get_api_response_ext(200, url='/shared-images/pattieblack', method='GET') memb_list = jsonutils.loads(res.body) num_members = len(memb_list['shared_images']) self.assertEqual(0, num_members) def test_replace_members(self): """ Tests replacing image members raises right exception """ self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=False) fixture = dict(member_id='pattieblack') body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) self.get_api_response_ext(401, method='PUT', body=body, url='/images/%s/members' % UUID2, content_type='json') def test_update_all_image_members_non_existing_image_id(self): """ Test update image members raises right exception """ # Update all image members fixture = dict(member_id='test1') req = webob.Request.blank('/images/%s/members' % _gen_uuid()) req.method = 'PUT' self.context.tenant = 'test2' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) res = req.get_response(self.api) self.assertEqual(404, res.status_int) def test_update_all_image_members_invalid_membership_association(self): """ Test update image members raises right exception """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, owner='test user') db_api.image_create(self.context, extra_fixture) # Add several members to image req = webob.Request.blank('/images/%s/members/test1' % UUID8) req.method = 'PUT' res = req.get_response(self.api) # Get all image members: res = self.get_api_response_ext(200, url='/images/%s/members' % UUID8, method='GET') memb_list = jsonutils.loads(res.body) num_members = len(memb_list['members']) self.assertEqual(1, num_members) fixture = dict(member_id='test1') body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) self.get_api_response_ext(400, url='/images/%s/members' % UUID8, method='PUT', body=body, content_type='json') def test_update_all_image_members_non_shared_image_forbidden(self): """ Test update image members raises right exception """ test_rserv = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware(test_rserv, is_admin=False) UUID9 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID9, size=19, protected=False) db_api.image_create(self.context, extra_fixture) fixture = dict(member_id='test1') req = webob.Request.blank('/images/%s/members' % UUID9) req.headers['X-Auth-Token'] = 'test1:test1:' req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(image_memberships=fixture)) res = req.get_response(api) self.assertEqual(403, res.status_int) def test_update_all_image_members(self): """ Test update non existing image members """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, owner='test user') db_api.image_create(self.context, extra_fixture) # Add several members to image req = webob.Request.blank('/images/%s/members/test1' % UUID8) req.method = 'PUT' req.get_response(self.api) fixture = [dict(member_id='test2', can_share=True)] body = jsonutils.dump_as_bytes(dict(memberships=fixture)) self.get_api_response_ext(204, url='/images/%s/members' % UUID8, method='PUT', body=body, content_type='json') def test_update_all_image_members_bad_request(self): """ Test that right exception is raises in case if wrong memberships association is supplied """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, owner='test user') db_api.image_create(self.context, extra_fixture) # Add several members to image req = webob.Request.blank('/images/%s/members/test1' % UUID8) req.method = 'PUT' req.get_response(self.api) fixture = dict(member_id='test3') body = jsonutils.dump_as_bytes(dict(memberships=fixture)) self.get_api_response_ext(400, url='/images/%s/members' % UUID8, method='PUT', body=body, content_type='json') def test_update_all_image_existing_members(self): """ Test update existing image members """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, owner='test user') db_api.image_create(self.context, extra_fixture) # Add several members to image req = webob.Request.blank('/images/%s/members/test1' % UUID8) req.method = 'PUT' req.get_response(self.api) fixture = [dict(member_id='test1', can_share=False)] body = jsonutils.dump_as_bytes(dict(memberships=fixture)) self.get_api_response_ext(204, url='/images/%s/members' % UUID8, method='PUT', body=body, content_type='json') def test_update_all_image_existing_deleted_members(self): """ Test update existing image members """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, owner='test user') db_api.image_create(self.context, extra_fixture) # Add a new member to an image req = webob.Request.blank('/images/%s/members/test1' % UUID8) req.method = 'PUT' req.get_response(self.api) # Delete the existing member self.get_api_response_ext(204, method='DELETE', url='/images/%s/members/test1' % UUID8) # Re-add the deleted member by replacing membership list fixture = [dict(member_id='test1', can_share=False)] body = jsonutils.dump_as_bytes(dict(memberships=fixture)) self.get_api_response_ext(204, url='/images/%s/members' % UUID8, method='PUT', body=body, content_type='json') memb_list = db_api.image_member_find(self.context, image_id=UUID8) self.assertEqual(1, len(memb_list)) def test_add_member(self): """ Tests adding image members raises right exception """ self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=False) self.get_api_response_ext(401, method='PUT', url=('/images/%s/members/pattieblack' % UUID2)) def test_add_member_to_image_positive(self): """ Test check that member can be successfully added """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, owner='test user') db_api.image_create(self.context, extra_fixture) fixture = dict(can_share=True) test_uri = '/images/%s/members/test_add_member_positive' body = jsonutils.dump_as_bytes(dict(member=fixture)) self.get_api_response_ext(204, url=test_uri % UUID8, method='PUT', body=body, content_type='json') def test_add_member_to_non_exist_image(self): """ Test check that member can't be added for non exist image """ fixture = dict(can_share=True) test_uri = '/images/%s/members/test_add_member_positive' body = jsonutils.dump_as_bytes(dict(member=fixture)) self.get_api_response_ext(404, url=test_uri % _gen_uuid(), method='PUT', body=body, content_type='json') def test_add_image_member_non_shared_image_forbidden(self): """ Test update image members raises right exception """ test_rserver_api = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware( test_rserver_api, is_admin=False) UUID9 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID9, size=19, protected=False) db_api.image_create(self.context, extra_fixture) fixture = dict(can_share=True) test_uri = '/images/%s/members/test_add_member_to_non_share_image' req = webob.Request.blank(test_uri % UUID9) req.headers['X-Auth-Token'] = 'test1:test1:' req.method = 'PUT' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(member=fixture)) res = req.get_response(api) self.assertEqual(403, res.status_int) def test_add_member_to_image_bad_request(self): """ Test check right status code is returned """ UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, owner='test user') db_api.image_create(self.context, extra_fixture) fixture = [dict(can_share=True)] test_uri = '/images/%s/members/test_add_member_bad_request' body = jsonutils.dump_as_bytes(dict(member=fixture)) self.get_api_response_ext(400, url=test_uri % UUID8, method='PUT', body=body, content_type='json') def test_delete_member(self): """ Tests deleting image members raises right exception """ self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=False) self.get_api_response_ext(401, method='DELETE', url=('/images/%s/members/pattieblack' % UUID2)) def test_delete_member_invalid(self): """ Tests deleting a invalid/non existing member raises right exception """ self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=True) res = self.get_api_response_ext(404, method='DELETE', url=('/images/%s/members/pattieblack' % UUID2)) self.assertIn(b'Membership could not be found', res.body) def test_delete_member_from_non_exist_image(self): """ Tests deleting image members raises right exception """ test_rserver_api = rserver.API(self.mapper) self.api = test_utils.FakeAuthMiddleware( test_rserver_api, is_admin=True) test_uri = '/images/%s/members/pattieblack' self.get_api_response_ext(404, method='DELETE', url=test_uri % _gen_uuid()) def test_delete_image_member_non_shared_image_forbidden(self): """ Test delete image members raises right exception """ test_rserver_api = rserver.API(self.mapper) api = test_utils.FakeAuthMiddleware( test_rserver_api, is_admin=False) UUID9 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID9, size=19, protected=False) db_api.image_create(self.context, extra_fixture) test_uri = '/images/%s/members/test_add_member_to_non_share_image' req = webob.Request.blank(test_uri % UUID9) req.headers['X-Auth-Token'] = 'test1:test1:' req.method = 'DELETE' req.content_type = 'application/json' res = req.get_response(api) self.assertEqual(403, res.status_int) def test_add_member_delete_create(self): """ Test check that the same member can be successfully added after delete it, and the same record will be reused for the same membership. """ # add a member UUID8 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID8, size=19, protected=False, owner='test user') db_api.image_create(self.context, extra_fixture) fixture = dict(can_share=True) test_uri = '/images/%s/members/test_add_member_delete_create' body = jsonutils.dump_as_bytes(dict(member=fixture)) self.get_api_response_ext(204, url=test_uri % UUID8, method='PUT', body=body, content_type='json') memb_list = db_api.image_member_find(self.context, image_id=UUID8) self.assertEqual(1, len(memb_list)) memb_list2 = db_api.image_member_find(self.context, image_id=UUID8, include_deleted=True) self.assertEqual(1, len(memb_list2)) # delete the member self.get_api_response_ext(204, method='DELETE', url=test_uri % UUID8) memb_list = db_api.image_member_find(self.context, image_id=UUID8) self.assertEqual(0, len(memb_list)) memb_list2 = db_api.image_member_find(self.context, image_id=UUID8, include_deleted=True) self.assertEqual(1, len(memb_list2)) # create it again self.get_api_response_ext(204, url=test_uri % UUID8, method='PUT', body=body, content_type='json') memb_list = db_api.image_member_find(self.context, image_id=UUID8) self.assertEqual(1, len(memb_list)) memb_list2 = db_api.image_member_find(self.context, image_id=UUID8, include_deleted=True) self.assertEqual(1, len(memb_list2)) def test_get_on_image_member(self): """ Test GET on image members raises 405 and produces correct Allow headers """ self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=False) uri = '/images/%s/members/123' % UUID1 req = webob.Request.blank(uri) req.method = 'GET' res = req.get_response(self.api) self.assertEqual(405, res.status_int) self.assertIn(('Allow', 'PUT, DELETE'), res.headerlist) def test_get_images_bad_urls(self): """Check that routes collections are not on (LP bug 1185828)""" self.get_api_response_ext(404, url='/images/detail.xxx') self.get_api_response_ext(404, url='/images.xxx') self.get_api_response_ext(404, url='/images/new') self.get_api_response_ext(200, url='/images/%s/members' % UUID1) self.get_api_response_ext(404, url='/images/%s/members.xxx' % UUID1) class TestRegistryAPILocations(base.IsolatedUnitTest, test_utils.RegistryAPIMixIn): def setUp(self): """Establish a clean test environment""" super(TestRegistryAPILocations, self).setUp() self.mapper = routes.Mapper() self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper), is_admin=True) def _get_extra_fixture(id, name, **kwargs): return self.get_extra_fixture( id, name, locations=[{'url': "file:///%s/%s" % (self.test_dir, id), 'metadata': {}, 'status': 'active'}], **kwargs) self.FIXTURES = [ _get_extra_fixture(UUID1, 'fake image #1', is_public=False, disk_format='ami', container_format='ami', min_disk=0, min_ram=0, owner=123, size=13, properties={'type': 'kernel'}), _get_extra_fixture(UUID2, 'fake image #2', min_disk=5, min_ram=256, size=19, properties={})] self.context = context.RequestContext(is_admin=True) db_api.get_engine() self.destroy_fixtures() self.create_fixtures() def tearDown(self): """Clear the test environment""" super(TestRegistryAPILocations, self).tearDown() self.destroy_fixtures() def test_show_from_locations(self): req = webob.Request.blank('/images/%s' % UUID1) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) image = res_dict['image'] self.assertIn('id', image['location_data'][0]) image['location_data'][0].pop('id') self.assertEqual(self.FIXTURES[0]['locations'][0], image['location_data'][0]) self.assertEqual(self.FIXTURES[0]['locations'][0]['url'], image['location_data'][0]['url']) self.assertEqual(self.FIXTURES[0]['locations'][0]['metadata'], image['location_data'][0]['metadata']) def test_show_from_location_data(self): req = webob.Request.blank('/images/%s' % UUID2) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) image = res_dict['image'] self.assertIn('id', image['location_data'][0]) image['location_data'][0].pop('id') self.assertEqual(self.FIXTURES[1]['locations'][0], image['location_data'][0]) self.assertEqual(self.FIXTURES[1]['locations'][0]['url'], image['location_data'][0]['url']) self.assertEqual(self.FIXTURES[1]['locations'][0]['metadata'], image['location_data'][0]['metadata']) def test_create_from_location_data_with_encryption(self): encryption_key = '1234567890123456' location_url1 = "file:///%s/%s" % (self.test_dir, _gen_uuid()) location_url2 = "file:///%s/%s" % (self.test_dir, _gen_uuid()) encrypted_location_url1 = crypt.urlsafe_encrypt(encryption_key, location_url1, 64) encrypted_location_url2 = crypt.urlsafe_encrypt(encryption_key, location_url2, 64) fixture = {'name': 'fake image #3', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'checksum': None, 'min_disk': 5, 'min_ram': 256, 'size': 19, 'location': encrypted_location_url1, 'location_data': [{'url': encrypted_location_url1, 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': encrypted_location_url2, 'metadata': {'key': 'value'}, 'status': 'active'}]} self.config(metadata_encryption_key=encryption_key) req = webob.Request.blank('/images') req.method = 'POST' req.content_type = 'application/json' req.body = jsonutils.dump_as_bytes(dict(image=fixture)) res = req.get_response(self.api) self.assertEqual(200, res.status_int) res_dict = jsonutils.loads(res.body) image = res_dict['image'] # NOTE(zhiyan) _normalize_image_location_for_db() function will # not re-encrypted the url within location. self.assertEqual(fixture['location'], image['location']) self.assertEqual(2, len(image['location_data'])) self.assertEqual(fixture['location_data'][0]['url'], image['location_data'][0]['url']) self.assertEqual(fixture['location_data'][0]['metadata'], image['location_data'][0]['metadata']) self.assertEqual(fixture['location_data'][1]['url'], image['location_data'][1]['url']) self.assertEqual(fixture['location_data'][1]['metadata'], image['location_data'][1]['metadata']) image_entry = db_api.image_get(self.context, image['id']) self.assertEqual(encrypted_location_url1, image_entry['locations'][0]['url']) self.assertEqual(encrypted_location_url2, image_entry['locations'][1]['url']) decrypted_location_url1 = crypt.urlsafe_decrypt( encryption_key, image_entry['locations'][0]['url']) decrypted_location_url2 = crypt.urlsafe_decrypt( encryption_key, image_entry['locations'][1]['url']) self.assertEqual(location_url1, decrypted_location_url1) self.assertEqual(location_url2, decrypted_location_url2) class TestSharability(test_utils.BaseTestCase): def setUp(self): super(TestSharability, self).setUp() self.setup_db() self.controller = glance.registry.api.v1.members.Controller() def setup_db(self): db_api.get_engine() db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def test_is_image_sharable_as_admin(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) ctxt2 = context.RequestContext(is_admin=True, user=TENANT2, auth_token='user:%s:admin' % TENANT2, owner_is_tenant=False) UUIDX = str(uuid.uuid4()) # We need private image and context.owner should not match image # owner image = db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) result = self.controller.is_image_sharable(ctxt2, image) self.assertTrue(result) def test_is_image_sharable_owner_can_share(self): TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) UUIDX = str(uuid.uuid4()) # We need private image and context.owner should not match image # owner image = db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) result = self.controller.is_image_sharable(ctxt1, image) self.assertTrue(result) def test_is_image_sharable_non_owner_cannot_share(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, auth_token='user:%s:user' % TENANT2, owner_is_tenant=False) UUIDX = str(uuid.uuid4()) # We need private image and context.owner should not match image # owner image = db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) result = self.controller.is_image_sharable(ctxt2, image) self.assertFalse(result) def test_is_image_sharable_non_owner_can_share_as_image_member(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, auth_token='user:%s:user' % TENANT2, owner_is_tenant=False) UUIDX = str(uuid.uuid4()) # We need private image and context.owner should not match image # owner image = db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) membership = {'can_share': True, 'member': TENANT2, 'image_id': UUIDX} db_api.image_member_create(ctxt1, membership) result = self.controller.is_image_sharable(ctxt2, image) self.assertTrue(result) def test_is_image_sharable_non_owner_as_image_member_without_sharing(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, auth_token='user:%s:user' % TENANT2, owner_is_tenant=False) UUIDX = str(uuid.uuid4()) # We need private image and context.owner should not match image # owner image = db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) membership = {'can_share': False, 'member': TENANT2, 'image_id': UUIDX} db_api.image_member_create(ctxt1, membership) result = self.controller.is_image_sharable(ctxt2, image) self.assertFalse(result) def test_is_image_sharable_owner_is_none(self): TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) ctxt2 = context.RequestContext(is_admin=False, tenant=None, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) UUIDX = str(uuid.uuid4()) # We need private image and context.owner should not match image # owner image = db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) result = self.controller.is_image_sharable(ctxt2, image) self.assertFalse(result) glance-12.0.0/glance/tests/unit/v1/test_registry_client.py0000664000567000056710000011047212701407047024712 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import os import uuid from mock import patch from six.moves import reload_module import testtools from glance.api.v1.images import Controller as acontroller from glance.common import client as test_client from glance.common import config from glance.common import exception from glance.common import timeutils from glance import context from glance.db.sqlalchemy import api as db_api from glance.registry.api.v1.images import Controller as rcontroller import glance.registry.client.v1.api as rapi from glance.registry.client.v1.api import client as rclient from glance.tests.unit import base from glance.tests import utils as test_utils import webob _gen_uuid = lambda: str(uuid.uuid4()) UUID1 = _gen_uuid() UUID2 = _gen_uuid() # NOTE(bcwaldon): needed to init config_dir cli opt config.parse_args(args=[]) class TestRegistryV1Client(base.IsolatedUnitTest, test_utils.RegistryAPIMixIn): """ Test proper actions made for both valid and invalid requests against a Registry service """ def setUp(self): """Establish a clean test environment""" super(TestRegistryV1Client, self).setUp() db_api.get_engine() self.context = context.RequestContext(is_admin=True) self.FIXTURES = [ self.get_fixture( id=UUID1, name='fake image #1', is_public=False, disk_format='ami', container_format='ami', size=13, location="swift://user:passwd@acct/container/obj.tar.0", properties={'type': 'kernel'}), self.get_fixture(id=UUID2, name='fake image #2', properties={}, size=19, location="file:///tmp/glance-tests/2")] self.destroy_fixtures() self.create_fixtures() self.client = rclient.RegistryClient("0.0.0.0") def tearDown(self): """Clear the test environment""" super(TestRegistryV1Client, self).tearDown() self.destroy_fixtures() def test_get_image_index(self): """Test correct set of public image returned""" fixture = { 'id': UUID2, 'name': 'fake image #2' } images = self.client.get_images() self.assertEqualImages(images, (UUID2,), unjsonify=False) for k, v in fixture.items(): self.assertEqual(v, images[0][k]) def test_create_image_with_null_min_disk_min_ram(self): UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, min_disk=None, min_ram=None) db_api.image_create(self.context, extra_fixture) image = self.client.get_image(UUID3) self.assertEqual(0, image["min_ram"]) self.assertEqual(0, image["min_disk"]) def test_get_index_sort_name_asc(self): """ Tests that the /images registry API returns list of public images sorted alphabetically by name in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='name', sort_dir='asc') self.assertEqualImages(images, (UUID3, UUID2, UUID4), unjsonify=False) def test_get_index_sort_status_desc(self): """ Tests that the /images registry API returns list of public images sorted alphabetically by status in descending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', status='queued') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='status', sort_dir='desc') self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False) def test_get_index_sort_disk_format_asc(self): """ Tests that the /images registry API returns list of public images sorted alphabetically by disk_format in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', disk_format='vdi') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='disk_format', sort_dir='asc') self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False) def test_get_index_sort_container_format_desc(self): """ Tests that the /images registry API returns list of public images sorted alphabetically by container_format in descending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', disk_format='iso', container_format='bare') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='container_format', sort_dir='desc') self.assertEqualImages(images, (UUID2, UUID4, UUID3), unjsonify=False) def test_get_index_sort_size_asc(self): """ Tests that the /images registry API returns list of public images sorted by size in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', disk_format='ami', container_format='ami', size=100) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='asdf', disk_format='iso', container_format='bare', size=2) db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='size', sort_dir='asc') self.assertEqualImages(images, (UUID4, UUID2, UUID3), unjsonify=False) def test_get_index_sort_created_at_asc(self): """ Tests that the /images registry API returns list of public images sorted by created_at in ascending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=time2) db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='created_at', sort_dir='asc') self.assertEqualImages(images, (UUID2, UUID4, UUID3), unjsonify=False) def test_get_index_sort_updated_at_desc(self): """ Tests that the /images registry API returns list of public images sorted by updated_at in descending order. """ now = timeutils.utcnow() time1 = now + datetime.timedelta(seconds=5) time2 = now UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, created_at=None, updated_at=time1) db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, created_at=None, updated_at=time2) db_api.image_create(self.context, extra_fixture) images = self.client.get_images(sort_key='updated_at', sort_dir='desc') self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False) def test_get_image_index_marker(self): """Test correct set of images returned with marker param.""" UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', status='saving') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(marker=UUID4) self.assertEqualImages(images, (UUID3, UUID2), unjsonify=False) def test_get_image_index_invalid_marker(self): """Test exception is raised when marker is invalid""" self.assertRaises(exception.Invalid, self.client.get_images, marker=_gen_uuid()) def test_get_image_index_forbidden_marker(self): """Test exception is raised when marker is forbidden""" UUID5 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID5, owner='0123', status='saving', is_public=False) db_api.image_create(self.context, extra_fixture) def non_admin_get_images(self, context, *args, **kwargs): """Convert to non-admin context""" context.is_admin = False rcontroller.__get_images(self, context, *args, **kwargs) rcontroller.__get_images = rcontroller._get_images self.stubs.Set(rcontroller, '_get_images', non_admin_get_images) self.assertRaises(exception.Invalid, self.client.get_images, marker=UUID5) def test_get_image_index_private_marker(self): """Test exception is not raised if private non-owned marker is used""" UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, owner='1234', status='saving', is_public=False) db_api.image_create(self.context, extra_fixture) try: self.client.get_images(marker=UUID4) except Exception as e: self.fail("Unexpected exception '%s'" % e) def test_get_image_index_limit(self): """Test correct number of images returned with limit param.""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(limit=2) self.assertEqual(2, len(images)) def test_get_image_index_marker_limit(self): """Test correct set of images returned with marker/limit params.""" UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='new name! #123', status='saving') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='new name! #125', status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(marker=UUID3, limit=1) self.assertEqualImages(images, (UUID2,), unjsonify=False) def test_get_image_index_limit_None(self): """Test correct set of images returned with limit param == None.""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(limit=None) self.assertEqual(3, len(images)) def test_get_image_index_by_name(self): """ Test correct set of public, name-filtered image returned. This is just a sanity check, we test the details call more in-depth. """ extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123') db_api.image_create(self.context, extra_fixture) images = self.client.get_images(filters={'name': 'new name! #123'}) self.assertEqual(1, len(images)) for image in images: self.assertEqual('new name! #123', image['name']) def test_get_image_details(self): """Tests that the detailed info about public images returned""" fixture = self.get_fixture(id=UUID2, name='fake image #2', properties={}, size=19) images = self.client.get_images_detailed() self.assertEqual(1, len(images)) for k, v in fixture.items(): self.assertEqual(v, images[0][k]) def test_get_image_details_marker_limit(self): """Test correct set of images returned with marker/limit params.""" UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, status='saving') db_api.image_create(self.context, extra_fixture) extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.get_images_detailed(marker=UUID3, limit=1) self.assertEqualImages(images, (UUID2,), unjsonify=False) def test_get_image_details_invalid_marker(self): """Test exception is raised when marker is invalid""" self.assertRaises(exception.Invalid, self.client.get_images_detailed, marker=_gen_uuid()) def test_get_image_details_forbidden_marker(self): """Test exception is raised when marker is forbidden""" UUID5 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID5, is_public=False, owner='0123', status='saving') db_api.image_create(self.context, extra_fixture) def non_admin_get_images(self, context, *args, **kwargs): """Convert to non-admin context""" context.is_admin = False rcontroller.__get_images(self, context, *args, **kwargs) rcontroller.__get_images = rcontroller._get_images self.stubs.Set(rcontroller, '_get_images', non_admin_get_images) self.assertRaises(exception.Invalid, self.client.get_images_detailed, marker=UUID5) def test_get_image_details_private_marker(self): """Test exception is not raised if private non-owned marker is used""" UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, is_public=False, owner='1234', status='saving') db_api.image_create(self.context, extra_fixture) try: self.client.get_images_detailed(marker=UUID4) except Exception as e: self.fail("Unexpected exception '%s'" % e) def test_get_image_details_by_name(self): """Tests that a detailed call can be filtered by name""" extra_fixture = self.get_fixture(id=_gen_uuid(), name='new name! #123') db_api.image_create(self.context, extra_fixture) filters = {'name': 'new name! #123'} images = self.client.get_images_detailed(filters=filters) self.assertEqual(1, len(images)) for image in images: self.assertEqual('new name! #123', image['name']) def test_get_image_details_by_status(self): """Tests that a detailed call can be filtered by status""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.get_images_detailed(filters={'status': 'saving'}) self.assertEqual(1, len(images)) for image in images: self.assertEqual('saving', image['status']) def test_get_image_details_by_container_format(self): """Tests that a detailed call can be filtered by container_format""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) filters = {'container_format': 'ovf'} images = self.client.get_images_detailed(filters=filters) self.assertEqual(2, len(images)) for image in images: self.assertEqual('ovf', image['container_format']) def test_get_image_details_by_disk_format(self): """Tests that a detailed call can be filtered by disk_format""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) filters = {'disk_format': 'vhd'} images = self.client.get_images_detailed(filters=filters) self.assertEqual(2, len(images)) for image in images: self.assertEqual('vhd', image['disk_format']) def test_get_image_details_with_maximum_size(self): """Tests that a detailed call can be filtered by size_max""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving', size=21) db_api.image_create(self.context, extra_fixture) images = self.client.get_images_detailed(filters={'size_max': 20}) self.assertEqual(1, len(images)) for image in images: self.assertLessEqual(image['size'], 20) def test_get_image_details_with_minimum_size(self): """Tests that a detailed call can be filtered by size_min""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.get_images_detailed(filters={'size_min': 20}) self.assertEqual(1, len(images)) for image in images: self.assertGreaterEqual(image['size'], 20) def test_get_image_details_with_changes_since(self): """Tests that a detailed call can be filtered by changes-since""" dt1 = timeutils.utcnow() - datetime.timedelta(1) iso1 = timeutils.isotime(dt1) dt2 = timeutils.utcnow() + datetime.timedelta(1) iso2 = timeutils.isotime(dt2) dt3 = timeutils.utcnow() + datetime.timedelta(2) dt4 = timeutils.utcnow() + datetime.timedelta(3) iso4 = timeutils.isotime(dt4) UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='fake image #3') db_api.image_create(self.context, extra_fixture) db_api.image_destroy(self.context, UUID3) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='fake image #4', created_at=dt3, updated_at=dt3) db_api.image_create(self.context, extra_fixture) # Check a standard list, 4 images in db (2 deleted) images = self.client.get_images_detailed(filters={}) self.assertEqualImages(images, (UUID4, UUID2), unjsonify=False) # Expect 3 images (1 deleted) filters = {'changes-since': iso1} images = self.client.get_images(filters=filters) self.assertEqualImages(images, (UUID4, UUID3, UUID2), unjsonify=False) # Expect 1 images (0 deleted) filters = {'changes-since': iso2} images = self.client.get_images_detailed(filters=filters) self.assertEqualImages(images, (UUID4,), unjsonify=False) # Expect 0 images (0 deleted) filters = {'changes-since': iso4} images = self.client.get_images(filters=filters) self.assertEqualImages(images, (), unjsonify=False) def test_get_image_details_with_size_min(self): """Tests that a detailed call can be filtered by size_min""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving') db_api.image_create(self.context, extra_fixture) images = self.client.get_images_detailed(filters={'size_min': 20}) self.assertEqual(1, len(images)) for image in images: self.assertGreaterEqual(image['size'], 20) def test_get_image_details_by_property(self): """Tests that a detailed call can be filtered by a property""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving', properties={'p a': 'v a'}) db_api.image_create(self.context, extra_fixture) filters = {'property-p a': 'v a'} images = self.client.get_images_detailed(filters=filters) self.assertEqual(1, len(images)) for image in images: self.assertEqual('v a', image['properties']['p a']) def test_get_image_is_public_v1(self): """Tests that a detailed call can be filtered by a property""" extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving', properties={'is_public': 'avalue'}) context = copy.copy(self.context) db_api.image_create(context, extra_fixture) filters = {'property-is_public': 'avalue'} images = self.client.get_images_detailed(filters=filters) self.assertEqual(1, len(images)) for image in images: self.assertEqual('avalue', image['properties']['is_public']) def test_get_image_details_sort_disk_format_asc(self): """ Tests that a detailed call returns list of public images sorted alphabetically by disk_format in ascending order. """ UUID3 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID3, name='asdf', disk_format='ami', container_format='ami') db_api.image_create(self.context, extra_fixture) UUID4 = _gen_uuid() extra_fixture = self.get_fixture(id=UUID4, name='xyz', disk_format='vdi') db_api.image_create(self.context, extra_fixture) images = self.client.get_images_detailed(sort_key='disk_format', sort_dir='asc') self.assertEqualImages(images, (UUID3, UUID4, UUID2), unjsonify=False) def test_get_image(self): """Tests that the detailed info about an image returned""" fixture = self.get_fixture(id=UUID1, name='fake image #1', disk_format='ami', container_format='ami', is_public=False, size=13, properties={'type': 'kernel'}) data = self.client.get_image(UUID1) for k, v in fixture.items(): el = data[k] self.assertEqual(v, data[k], "Failed v != data[k] where v = %(v)s and " "k = %(k)s and data[k] = %(el)s" % {'v': v, 'k': k, 'el': el}) def test_get_image_non_existing(self): """Tests that NotFound is raised when getting a non-existing image""" self.assertRaises(exception.NotFound, self.client.get_image, _gen_uuid()) def test_add_image_basic(self): """Tests that we can add image metadata and returns the new id""" fixture = self.get_fixture() new_image = self.client.add_image(fixture) # Test all other attributes set data = self.client.get_image(new_image['id']) for k, v in fixture.items(): self.assertEqual(v, data[k]) # Test status was updated properly self.assertIn('status', data.keys()) self.assertEqual('active', data['status']) def test_add_image_with_properties(self): """Tests that we can add image metadata with properties""" fixture = self.get_fixture(location="file:///tmp/glance-tests/2", properties={'distro': 'Ubuntu 10.04 LTS'}) new_image = self.client.add_image(fixture) del fixture['location'] for k, v in fixture.items(): self.assertEqual(v, new_image[k]) # Test status was updated properly self.assertIn('status', new_image.keys()) self.assertEqual('active', new_image['status']) def test_add_image_with_location_data(self): """Tests that we can add image metadata with properties""" location = "file:///tmp/glance-tests/2" loc_meta = {'key': 'value'} fixture = self.get_fixture(location_data=[{'url': location, 'metadata': loc_meta, 'status': 'active'}], properties={'distro': 'Ubuntu 10.04 LTS'}) new_image = self.client.add_image(fixture) self.assertEqual(location, new_image['location']) self.assertEqual(location, new_image['location_data'][0]['url']) self.assertEqual(loc_meta, new_image['location_data'][0]['metadata']) def test_add_image_with_location_data_with_encryption(self): """Tests that we can add image metadata with properties and enable encryption. """ self.client.metadata_encryption_key = '1234567890123456' location = "file:///tmp/glance-tests/%d" loc_meta = {'key': 'value'} fixture = {'name': 'fake public image', 'is_public': True, 'disk_format': 'vmdk', 'container_format': 'ovf', 'size': 19, 'location_data': [{'url': location % 1, 'metadata': loc_meta, 'status': 'active'}, {'url': location % 2, 'metadata': {}, 'status': 'active'}], 'properties': {'distro': 'Ubuntu 10.04 LTS'}} new_image = self.client.add_image(fixture) self.assertEqual(location % 1, new_image['location']) self.assertEqual(2, len(new_image['location_data'])) self.assertEqual(location % 1, new_image['location_data'][0]['url']) self.assertEqual(loc_meta, new_image['location_data'][0]['metadata']) self.assertEqual(location % 2, new_image['location_data'][1]['url']) self.assertEqual({}, new_image['location_data'][1]['metadata']) self.client.metadata_encryption_key = None def test_add_image_already_exists(self): """Tests proper exception is raised if image with ID already exists""" fixture = self.get_fixture(id=UUID2, location="file:///tmp/glance-tests/2") self.assertRaises(exception.Duplicate, self.client.add_image, fixture) def test_add_image_with_bad_status(self): """Tests proper exception is raised if a bad status is set""" fixture = self.get_fixture(status='bad status', location="file:///tmp/glance-tests/2") self.assertRaises(exception.Invalid, self.client.add_image, fixture) def test_update_image(self): """Tests that the /images PUT registry API updates the image""" fixture = {'name': 'fake public image #2', 'disk_format': 'vmdk'} self.assertTrue(self.client.update_image(UUID2, fixture)) # Test all other attributes set data = self.client.get_image(UUID2) for k, v in fixture.items(): self.assertEqual(v, data[k]) def test_update_image_not_existing(self): """Tests non existing image update doesn't work""" fixture = self.get_fixture(status='bad status') self.assertRaises(exception.NotFound, self.client.update_image, _gen_uuid(), fixture) def test_delete_image(self): """Tests that image metadata is deleted properly""" # Grab the original number of images orig_num_images = len(self.client.get_images()) # Delete image #2 image = self.FIXTURES[1] deleted_image = self.client.delete_image(image['id']) self.assertTrue(deleted_image) self.assertEqual(image['id'], deleted_image['id']) self.assertTrue(deleted_image['deleted']) self.assertTrue(deleted_image['deleted_at']) # Verify one less image new_num_images = len(self.client.get_images()) self.assertEqual(orig_num_images - 1, new_num_images) def test_delete_image_not_existing(self): """Check that one cannot delete non-existing image.""" self.assertRaises(exception.NotFound, self.client.delete_image, _gen_uuid()) def test_get_image_members(self): """Test getting image members.""" memb_list = self.client.get_image_members(UUID2) num_members = len(memb_list) self.assertEqual(0, num_members) def test_get_image_members_not_existing(self): """Test getting non-existent image members.""" self.assertRaises(exception.NotFound, self.client.get_image_members, _gen_uuid()) def test_get_member_images(self): """Test getting member images.""" memb_list = self.client.get_member_images('pattieblack') num_members = len(memb_list) self.assertEqual(0, num_members) def test_add_replace_members(self): """Test replacing image members.""" self.assertTrue(self.client.add_member(UUID2, 'pattieblack')) self.assertTrue(self.client.replace_members(UUID2, dict(member_id='pattie' 'black2'))) def test_add_delete_member(self): """Tests deleting image members""" self.client.add_member(UUID2, 'pattieblack') self.assertTrue(self.client.delete_member(UUID2, 'pattieblack')) class TestBaseClient(testtools.TestCase): """ Test proper actions made for both valid and invalid requests against a Registry service """ def test_connect_kwargs_default_values(self): actual = test_client.BaseClient('127.0.0.1').get_connect_kwargs() self.assertEqual({'timeout': None}, actual) def test_connect_kwargs(self): base_client = test_client.BaseClient( host='127.0.0.1', port=80, timeout=1, use_ssl=True) actual = base_client.get_connect_kwargs() expected = {'insecure': False, 'key_file': None, 'cert_file': None, 'timeout': 1} for k in expected.keys(): self.assertEqual(expected[k], actual[k]) class TestRegistryV1ClientApi(base.IsolatedUnitTest): def setUp(self): """Establish a clean test environment.""" super(TestRegistryV1ClientApi, self).setUp() self.context = context.RequestContext() reload_module(rapi) def tearDown(self): """Clear the test environment.""" super(TestRegistryV1ClientApi, self).tearDown() def test_get_registry_client(self): actual_client = rapi.get_registry_client(self.context) self.assertIsNone(actual_client.identity_headers) def test_get_registry_client_with_identity_headers(self): self.config(send_identity_headers=True) expected_identity_headers = { 'X-User-Id': '', 'X-Tenant-Id': '', 'X-Roles': ','.join(self.context.roles), 'X-Identity-Status': 'Confirmed', 'X-Service-Catalog': 'null', } actual_client = rapi.get_registry_client(self.context) self.assertEqual(expected_identity_headers, actual_client.identity_headers) def test_configure_registry_client_not_using_use_user_token(self): self.config(use_user_token=False) with patch.object(rapi, 'configure_registry_admin_creds') as mock_rapi: rapi.configure_registry_client() mock_rapi.assert_called_once_with() def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'): return { 'user': 'user', 'password': 'password', 'username': 'user', 'tenant': 'tenant', 'auth_url': auth_url, 'strategy': strategy, 'region': 'region' } def test_configure_registry_admin_creds(self): expected = self._get_fake_config_creds(auth_url=None, strategy='configured_strategy') self.config(admin_user=expected['user']) self.config(admin_password=expected['password']) self.config(admin_tenant_name=expected['tenant']) self.config(auth_strategy=expected['strategy']) self.config(auth_region=expected['region']) self.stubs.Set(os, 'getenv', lambda x: None) self.assertIsNone(rapi._CLIENT_CREDS) rapi.configure_registry_admin_creds() self.assertEqual(expected, rapi._CLIENT_CREDS) def test_configure_registry_admin_creds_with_auth_url(self): expected = self._get_fake_config_creds() self.config(admin_user=expected['user']) self.config(admin_password=expected['password']) self.config(admin_tenant_name=expected['tenant']) self.config(auth_url=expected['auth_url']) self.config(auth_strategy='test_strategy') self.config(auth_region=expected['region']) self.assertIsNone(rapi._CLIENT_CREDS) rapi.configure_registry_admin_creds() self.assertEqual(expected, rapi._CLIENT_CREDS) class FakeResponse(object): status = 202 def getheader(*args, **kwargs): return None class TestRegistryV1ClientRequests(base.IsolatedUnitTest): def setUp(self): super(TestRegistryV1ClientRequests, self).setUp() def tearDown(self): super(TestRegistryV1ClientRequests, self).tearDown() def test_do_request_with_identity_headers(self): identity_headers = {'foo': 'bar'} self.client = rclient.RegistryClient("0.0.0.0", identity_headers=identity_headers) with patch.object(test_client.BaseClient, 'do_request', return_value=FakeResponse()) as mock_do_request: self.client.do_request("GET", "/images") mock_do_request.assert_called_once_with("GET", "/images", headers=identity_headers) def test_do_request(self): self.client = rclient.RegistryClient("0.0.0.0") with patch.object(test_client.BaseClient, 'do_request', return_value=FakeResponse()) as mock_do_request: self.client.do_request("GET", "/images") mock_do_request.assert_called_once_with("GET", "/images", headers={}) def test_registry_invalid_token_exception_handling(self): self.image_controller = acontroller() request = webob.Request.blank('/images') request.method = 'GET' request.context = context.RequestContext() with patch.object(rapi, 'get_images_detail') as mock_detail: mock_detail.side_effect = exception.NotAuthenticated() self.assertRaises(webob.exc.HTTPUnauthorized, self.image_controller.detail, request) glance-12.0.0/glance/tests/integration/0000775000567000056710000000000012701407204021077 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/integration/legacy_functional/0000775000567000056710000000000012701407204024565 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/integration/legacy_functional/test_v1_api.py0000664000567000056710000021630612701407051027365 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import hashlib import os import tempfile from oslo_serialization import jsonutils from oslo_utils import units import testtools from glance.common import timeutils from glance.tests.integration.legacy_functional import base from glance.tests.utils import minimal_headers FIVE_KB = 5 * units.Ki FIVE_GB = 5 * units.Gi class TestApi(base.ApiTest): def test_get_head_simple_post(self): # 0. GET /images # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 1. GET /images/detail # Verify no public images path = "/v1/images/detail" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 2. POST /images with public image named Image1 # attribute and no custom properties. Verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) # 3. HEAD image # Verify image found now path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) # 4. GET image # Verify all information on image we just added is correct path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) expected_image_headers = { 'x-image-meta-id': image_id, 'x-image-meta-name': 'Image1', 'x-image-meta-is_public': 'True', 'x-image-meta-status': 'active', 'x-image-meta-disk_format': 'raw', 'x-image-meta-container_format': 'ovf', 'x-image-meta-size': str(FIVE_KB)} expected_std_headers = { 'content-length': str(FIVE_KB), 'content-type': 'application/octet-stream'} for expected_key, expected_value in expected_image_headers.items(): self.assertEqual(expected_value, response[expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, response[expected_key])) for expected_key, expected_value in expected_std_headers.items(): self.assertEqual(expected_value, response[expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, response[expected_key])) self.assertEqual("*" * FIVE_KB, content) self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(), hashlib.md5(content).hexdigest()) # 5. GET /images # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = {"images": [ {"container_format": "ovf", "disk_format": "raw", "id": image_id, "name": "Image1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120}]} self.assertEqual(expected_result, jsonutils.loads(content)) # 6. GET /images/detail # Verify image and all its metadata path = "/v1/images/detail" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) expected_image = { "status": "active", "name": "Image1", "deleted": False, "container_format": "ovf", "disk_format": "raw", "id": image_id, "is_public": True, "deleted_at": None, "properties": {}, "size": 5120} image = jsonutils.loads(content) for expected_key, expected_value in expected_image.items(): self.assertEqual(expected_value, image['images'][0][expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, image['images'][0][expected_key])) # 7. PUT image with custom properties of "distro" and "arch" # Verify 200 returned headers = {'X-Image-Meta-Property-Distro': 'Ubuntu', 'X-Image-Meta-Property-Arch': 'x86_64'} path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual("x86_64", data['image']['properties']['arch']) self.assertEqual("Ubuntu", data['image']['properties']['distro']) # 8. GET /images/detail # Verify image and all its metadata path = "/v1/images/detail" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) expected_image = { "status": "active", "name": "Image1", "deleted": False, "container_format": "ovf", "disk_format": "raw", "id": image_id, "is_public": True, "deleted_at": None, "properties": {'distro': 'Ubuntu', 'arch': 'x86_64'}, "size": 5120} image = jsonutils.loads(content) for expected_key, expected_value in expected_image.items(): self.assertEqual(expected_value, image['images'][0][expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, image['images'][0][expected_key])) # 9. PUT image and remove a previously existing property. headers = {'X-Image-Meta-Property-Arch': 'x86_64'} path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) path = "/v1/images/detail" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'][0] self.assertEqual(1, len(data['properties'])) self.assertEqual("x86_64", data['properties']['arch']) # 10. PUT image and add a previously deleted property. headers = {'X-Image-Meta-Property-Distro': 'Ubuntu', 'X-Image-Meta-Property-Arch': 'x86_64'} path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) data = jsonutils.loads(content) path = "/v1/images/detail" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'][0] self.assertEqual(2, len(data['properties'])) self.assertEqual("x86_64", data['properties']['arch']) self.assertEqual("Ubuntu", data['properties']['distro']) self.assertNotEqual(data['created_at'], data['updated_at']) # DELETE image path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'DELETE') self.assertEqual(200, response.status) def test_queued_process_flow(self): """ We test the process flow where a user registers an image with Glance but does not immediately upload an image file. Later, the user uploads an image file using a PUT operation. We track the changing of image status throughout this process. 0. GET /images - Verify no public images 1. POST /images with public image named Image1 with no location attribute and no image data. - Verify 201 returned 2. GET /images - Verify one public image 3. HEAD image - Verify image now in queued status 4. PUT image with image data - Verify 200 returned 5. HEAD images - Verify image now in active status 6. GET /images - Verify one public image """ # 0. GET /images # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 1. POST /images with public image named Image1 # with no location or image data headers = minimal_headers('Image1') path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertIsNone(data['image']['checksum']) self.assertEqual(0, data['image']['size']) self.assertEqual('ovf', data['image']['container_format']) self.assertEqual('raw', data['image']['disk_format']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) image_id = data['image']['id'] # 2. GET /images # Verify 1 public image path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(image_id, data['images'][0]['id']) self.assertIsNone(data['images'][0]['checksum']) self.assertEqual(0, data['images'][0]['size']) self.assertEqual('ovf', data['images'][0]['container_format']) self.assertEqual('raw', data['images'][0]['disk_format']) self.assertEqual("Image1", data['images'][0]['name']) # 3. HEAD /images # Verify status is in queued path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) self.assertEqual("queued", response['x-image-meta-status']) self.assertEqual('0', response['x-image-meta-size']) self.assertEqual(image_id, response['x-image-meta-id']) # 4. PUT image with image data, verify 200 returned image_data = "*" * FIVE_KB headers = {'Content-Type': 'application/octet-stream'} path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'PUT', headers=headers, body=image_data) self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) # 5. HEAD /images # Verify status is in active path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) self.assertEqual("active", response['x-image-meta-status']) # 6. GET /images # Verify 1 public image still... path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(hashlib.md5(image_data).hexdigest(), data['images'][0]['checksum']) self.assertEqual(image_id, data['images'][0]['id']) self.assertEqual(FIVE_KB, data['images'][0]['size']) self.assertEqual('ovf', data['images'][0]['container_format']) self.assertEqual('raw', data['images'][0]['disk_format']) self.assertEqual("Image1", data['images'][0]['name']) # DELETE image path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'DELETE') self.assertEqual(200, response.status) def test_v1_not_enabled(self): self.config(enable_v1_api=False) path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(300, response.status) def test_v1_enabled(self): self.config(enable_v1_api=True) path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) def test_zero_initial_size(self): """ A test to ensure that an image with size explicitly set to zero has status that immediately transitions to active. """ # 1. POST /images with public image named Image1 # attribute and a size of zero. # Verify a 201 OK is returned headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Size': '0', 'X-Image-Meta-Name': 'Image1', 'X-Image-Meta-disk_format': 'raw', 'X-image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) # 2. HEAD image-location # Verify image size is zero and the status is active path = response.get('location') response, content = self.http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('0', response['x-image-meta-size']) self.assertEqual('active', response['x-image-meta-status']) # 3. GET image-location # Verify image content is empty response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual(0, len(content)) def test_traceback_not_consumed(self): """ A test that errors coming from the POST API do not get consumed and print the actual error message, and not something like <traceback object at 0x1918d40> :see https://bugs.launchpad.net/glance/+bug/755912 """ # POST /images with binary data, but not setting # Content-Type to application/octet-stream, verify a # 400 returned and that the error is readable. with tempfile.NamedTemporaryFile() as test_data_file: test_data_file.write("XXX") test_data_file.flush() path = "/v1/images" headers = minimal_headers('Image1') headers['Content-Type'] = 'not octet-stream' response, content = self.http.request(path, 'POST', body=test_data_file.name, headers=headers) self.assertEqual(400, response.status) expected = "Content-Type must be application/octet-stream" self.assertIn(expected, content, "Could not find '%s' in '%s'" % (expected, content)) def test_filtered_images(self): """ Set up four test images and ensure each query param filter works """ # 0. GET /images # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) image_ids = [] # 1. POST /images with three public images, and one private image # with various attributes headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'Image1', 'X-Image-Meta-Status': 'active', 'X-Image-Meta-Container-Format': 'ovf', 'X-Image-Meta-Disk-Format': 'vdi', 'X-Image-Meta-Size': '19', 'X-Image-Meta-Is-Public': 'True', 'X-Image-Meta-Protected': 'True', 'X-Image-Meta-Property-pants': 'are on'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual("are on", data['image']['properties']['pants']) self.assertTrue(data['image']['is_public']) image_ids.append(data['image']['id']) headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'My Image!', 'X-Image-Meta-Status': 'active', 'X-Image-Meta-Container-Format': 'ovf', 'X-Image-Meta-Disk-Format': 'vhd', 'X-Image-Meta-Size': '20', 'X-Image-Meta-Is-Public': 'True', 'X-Image-Meta-Protected': 'False', 'X-Image-Meta-Property-pants': 'are on'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual("are on", data['image']['properties']['pants']) self.assertTrue(data['image']['is_public']) image_ids.append(data['image']['id']) headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'My Image!', 'X-Image-Meta-Status': 'saving', 'X-Image-Meta-Container-Format': 'ami', 'X-Image-Meta-Disk-Format': 'ami', 'X-Image-Meta-Size': '21', 'X-Image-Meta-Is-Public': 'True', 'X-Image-Meta-Protected': 'False', 'X-Image-Meta-Property-pants': 'are off'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual("are off", data['image']['properties']['pants']) self.assertTrue(data['image']['is_public']) image_ids.append(data['image']['id']) headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'My Private Image', 'X-Image-Meta-Status': 'active', 'X-Image-Meta-Container-Format': 'ami', 'X-Image-Meta-Disk-Format': 'ami', 'X-Image-Meta-Size': '22', 'X-Image-Meta-Is-Public': 'False', 'X-Image-Meta-Protected': 'False'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertFalse(data['image']['is_public']) image_ids.append(data['image']['id']) # 2. GET /images # Verify three public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(3, len(data['images'])) # 3. GET /images with name filter # Verify correct images returned with name params = "name=My%20Image!" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['images'])) for image in data['images']: self.assertEqual("My Image!", image['name']) # 4. GET /images with status filter # Verify correct images returned with status params = "status=queued" path = "/v1/images/detail?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(3, len(data['images'])) for image in data['images']: self.assertEqual("queued", image['status']) params = "status=active" path = "/v1/images/detail?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(0, len(data['images'])) # 5. GET /images with container_format filter # Verify correct images returned with container_format params = "container_format=ovf" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['images'])) for image in data['images']: self.assertEqual("ovf", image['container_format']) # 6. GET /images with disk_format filter # Verify correct images returned with disk_format params = "disk_format=vdi" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(1, len(data['images'])) for image in data['images']: self.assertEqual("vdi", image['disk_format']) # 7. GET /images with size_max filter # Verify correct images returned with size <= expected params = "size_max=20" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['images'])) for image in data['images']: self.assertLessEqual(image['size'], 20) # 8. GET /images with size_min filter # Verify correct images returned with size >= expected params = "size_min=20" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['images'])) for image in data['images']: self.assertGreaterEqual(image['size'], 20) # 9. Get /images with is_public=None filter # Verify correct images returned with property # Bug lp:803656 Support is_public in filtering params = "is_public=None" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(4, len(data['images'])) # 10. Get /images with is_public=False filter # Verify correct images returned with property # Bug lp:803656 Support is_public in filtering params = "is_public=False" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(1, len(data['images'])) for image in data['images']: self.assertEqual("My Private Image", image['name']) # 11. Get /images with is_public=True filter # Verify correct images returned with property # Bug lp:803656 Support is_public in filtering params = "is_public=True" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(3, len(data['images'])) for image in data['images']: self.assertNotEqual(image['name'], "My Private Image") # 12. Get /images with protected=False filter # Verify correct images returned with property params = "protected=False" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['images'])) for image in data['images']: self.assertNotEqual(image['name'], "Image1") # 13. Get /images with protected=True filter # Verify correct images returned with property params = "protected=True" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(1, len(data['images'])) for image in data['images']: self.assertEqual("Image1", image['name']) # 14. GET /images with property filter # Verify correct images returned with property params = "property-pants=are%20on" path = "/v1/images/detail?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['images'])) for image in data['images']: self.assertEqual("are on", image['properties']['pants']) # 15. GET /images with property filter and name filter # Verify correct images returned with property and name # Make sure you quote the url when using more than one param! params = "name=My%20Image!&property-pants=are%20on" path = "/v1/images/detail?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(1, len(data['images'])) for image in data['images']: self.assertEqual("are on", image['properties']['pants']) self.assertEqual("My Image!", image['name']) # 16. GET /images with past changes-since filter yesterday = timeutils.isotime(timeutils.utcnow() - datetime.timedelta(1)) params = "changes-since=%s" % yesterday path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(3, len(data['images'])) # one timezone west of Greenwich equates to an hour ago # taking care to pre-urlencode '+' as '%2B', otherwise the timezone # '+' is wrongly decoded as a space # TODO(eglynn): investigate '+' --> decoding, an artifact # of WSGI/webob dispatch? now = timeutils.utcnow() hour_ago = now.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00') params = "changes-since=%s" % hour_ago path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(3, len(data['images'])) # 17. GET /images with future changes-since filter tomorrow = timeutils.isotime(timeutils.utcnow() + datetime.timedelta(1)) params = "changes-since=%s" % tomorrow path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(0, len(data['images'])) # one timezone east of Greenwich equates to an hour from now now = timeutils.utcnow() hour_hence = now.strftime('%Y-%m-%dT%H:%M:%S-01:00') params = "changes-since=%s" % hour_hence path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(0, len(data['images'])) # 18. GET /images with size_min filter # Verify correct images returned with size >= expected params = "size_min=-1" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(400, response.status) self.assertIn("filter size_min got -1", content) # 19. GET /images with size_min filter # Verify correct images returned with size >= expected params = "size_max=-1" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(400, response.status) self.assertIn("filter size_max got -1", content) # 20. GET /images with size_min filter # Verify correct images returned with size >= expected params = "min_ram=-1" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(400, response.status) self.assertIn("Bad value passed to filter min_ram got -1", content) # 21. GET /images with size_min filter # Verify correct images returned with size >= expected params = "protected=imalittleteapot" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(400, response.status) self.assertIn("protected got imalittleteapot", content) # 22. GET /images with size_min filter # Verify correct images returned with size >= expected params = "is_public=imalittleteapot" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(400, response.status) self.assertIn("is_public got imalittleteapot", content) def test_limited_images(self): """ Ensure marker and limit query params work """ # 0. GET /images # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) image_ids = [] # 1. POST /images with three public images with various attributes headers = minimal_headers('Image1') path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) image_ids.append(jsonutils.loads(content)['image']['id']) headers = minimal_headers('Image2') path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) image_ids.append(jsonutils.loads(content)['image']['id']) headers = minimal_headers('Image3') path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) image_ids.append(jsonutils.loads(content)['image']['id']) # 2. GET /images with all images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(3, len(images)) # 3. GET /images with limit of 2 # Verify only two images were returned params = "limit=2" path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'] self.assertEqual(2, len(data)) self.assertEqual(images[0]['id'], data[0]['id']) self.assertEqual(images[1]['id'], data[1]['id']) # 4. GET /images with marker # Verify only two images were returned params = "marker=%s" % images[0]['id'] path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'] self.assertEqual(2, len(data)) self.assertEqual(images[1]['id'], data[0]['id']) self.assertEqual(images[2]['id'], data[1]['id']) # 5. GET /images with marker and limit # Verify only one image was returned with the correct id params = "limit=1&marker=%s" % images[1]['id'] path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'] self.assertEqual(1, len(data)) self.assertEqual(images[2]['id'], data[0]['id']) # 6. GET /images/detail with marker and limit # Verify only one image was returned with the correct id params = "limit=1&marker=%s" % images[1]['id'] path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'] self.assertEqual(1, len(data)) self.assertEqual(images[2]['id'], data[0]['id']) # DELETE images for image_id in image_ids: path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'DELETE') self.assertEqual(200, response.status) def test_ordered_images(self): """ Set up three test images and ensure each query param filter works """ # 0. GET /images # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 1. POST /images with three public images with various attributes image_ids = [] headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'Image1', 'X-Image-Meta-Status': 'active', 'X-Image-Meta-Container-Format': 'ovf', 'X-Image-Meta-Disk-Format': 'vdi', 'X-Image-Meta-Size': '19', 'X-Image-Meta-Is-Public': 'True'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) image_ids.append(jsonutils.loads(content)['image']['id']) headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'ASDF', 'X-Image-Meta-Status': 'active', 'X-Image-Meta-Container-Format': 'bare', 'X-Image-Meta-Disk-Format': 'iso', 'X-Image-Meta-Size': '2', 'X-Image-Meta-Is-Public': 'True'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) image_ids.append(jsonutils.loads(content)['image']['id']) headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'XYZ', 'X-Image-Meta-Status': 'saving', 'X-Image-Meta-Container-Format': 'ami', 'X-Image-Meta-Disk-Format': 'ami', 'X-Image-Meta-Size': '5', 'X-Image-Meta-Is-Public': 'True'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) image_ids.append(jsonutils.loads(content)['image']['id']) # 2. GET /images with no query params # Verify three public images sorted by created_at desc path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(3, len(data['images'])) self.assertEqual(image_ids[2], data['images'][0]['id']) self.assertEqual(image_ids[1], data['images'][1]['id']) self.assertEqual(image_ids[0], data['images'][2]['id']) # 3. GET /images sorted by name asc params = 'sort_key=name&sort_dir=asc' path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(3, len(data['images'])) self.assertEqual(image_ids[1], data['images'][0]['id']) self.assertEqual(image_ids[0], data['images'][1]['id']) self.assertEqual(image_ids[2], data['images'][2]['id']) # 4. GET /images sorted by size desc params = 'sort_key=size&sort_dir=desc' path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(3, len(data['images'])) self.assertEqual(image_ids[0], data['images'][0]['id']) self.assertEqual(image_ids[2], data['images'][1]['id']) self.assertEqual(image_ids[1], data['images'][2]['id']) # 5. GET /images sorted by size desc with a marker params = 'sort_key=size&sort_dir=desc&marker=%s' % image_ids[0] path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['images'])) self.assertEqual(image_ids[2], data['images'][0]['id']) self.assertEqual(image_ids[1], data['images'][1]['id']) # 6. GET /images sorted by name asc with a marker params = 'sort_key=name&sort_dir=asc&marker=%s' % image_ids[2] path = "/v1/images?%s" % (params) response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(0, len(data['images'])) # DELETE images for image_id in image_ids: path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'DELETE') self.assertEqual(200, response.status) def test_duplicate_image_upload(self): """ Upload initial image, then attempt to upload duplicate image """ # 0. GET /images # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 1. POST /images with public image named Image1 headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'Image1', 'X-Image-Meta-Status': 'active', 'X-Image-Meta-Container-Format': 'ovf', 'X-Image-Meta-Disk-Format': 'vdi', 'X-Image-Meta-Size': '19', 'X-Image-Meta-Is-Public': 'True'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] # 2. POST /images with public image named Image1, and ID: 1 headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'Image1 Update', 'X-Image-Meta-Status': 'active', 'X-Image-Meta-Container-Format': 'ovf', 'X-Image-Meta-Disk-Format': 'vdi', 'X-Image-Meta-Size': '19', 'X-Image-Meta-Id': image['id'], 'X-Image-Meta-Is-Public': 'True'} path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(409, response.status) def test_delete_not_existing(self): """ We test the following: 0. GET /images/1 - Verify 404 1. DELETE /images/1 - Verify 404 """ # 0. GET /images # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 1. DELETE /images/1 # Verify 404 returned path = "/v1/images/1" response, content = self.http.request(path, 'DELETE') self.assertEqual(404, response.status) def _do_test_post_image_content_bad_format(self, format): """ We test that missing container/disk format fails with 400 "Bad Request" :see https://bugs.launchpad.net/glance/+bug/933702 """ # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(0, len(images)) path = "/v1/images" # POST /images without given format being specified headers = minimal_headers('Image1') headers['X-Image-Meta-' + format] = 'bad_value' with tempfile.NamedTemporaryFile() as test_data_file: test_data_file.write("XXX") test_data_file.flush() response, content = self.http.request(path, 'POST', headers=headers, body=test_data_file.name) self.assertEqual(400, response.status) type = format.replace('_format', '') expected = "Invalid %s format 'bad_value' for image" % type self.assertIn(expected, content, "Could not find '%s' in '%s'" % (expected, content)) # make sure the image was not created # Verify no public images path = "/v1/images" response, content = self.http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(0, len(images)) def test_post_image_content_bad_container_format(self): self._do_test_post_image_content_bad_format('container_format') def test_post_image_content_bad_disk_format(self): self._do_test_post_image_content_bad_format('disk_format') def _do_test_put_image_content_missing_format(self, format): """ We test that missing container/disk format only fails with 400 "Bad Request" when the image content is PUT (i.e. not on the original POST of a queued image). :see https://bugs.launchpad.net/glance/+bug/937216 """ # POST queued image path = "/v1/images" headers = { 'X-Image-Meta-Name': 'Image1', 'X-Image-Meta-Is-Public': 'True', } response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] self.addDetail('image_data', testtools.content.json_content(data)) # PUT image content images without given format being specified path = "/v1/images/%s" % (image_id) headers = minimal_headers('Image1') del headers['X-Image-Meta-' + format] with tempfile.NamedTemporaryFile() as test_data_file: test_data_file.write("XXX") test_data_file.flush() response, content = self.http.request(path, 'PUT', headers=headers, body=test_data_file.name) self.assertEqual(400, response.status) type = format.replace('_format', '').capitalize() expected = "%s format is not specified" % type self.assertIn(expected, content, "Could not find '%s' in '%s'" % (expected, content)) def test_put_image_content_bad_container_format(self): self._do_test_put_image_content_missing_format('container_format') def test_put_image_content_bad_disk_format(self): self._do_test_put_image_content_missing_format('disk_format') def _do_test_mismatched_attribute(self, attribute, value): """ Test mismatched attribute. """ image_data = "*" * FIVE_KB headers = minimal_headers('Image1') headers[attribute] = value path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(400, response.status) images_dir = os.path.join(self.test_dir, 'images') image_count = len([name for name in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, name))]) self.assertEqual(0, image_count) def test_mismatched_size(self): """ Test mismatched size. """ self._do_test_mismatched_attribute('x-image-meta-size', str(FIVE_KB + 1)) def test_mismatched_checksum(self): """ Test mismatched checksum. """ self._do_test_mismatched_attribute('x-image-meta-checksum', 'foobar') class TestApiWithFakeAuth(base.ApiTest): def __init__(self, *args, **kwargs): super(TestApiWithFakeAuth, self).__init__(*args, **kwargs) self.api_flavor = 'fakeauth' self.registry_flavor = 'fakeauth' def test_ownership(self): # Add an image with admin privileges and ensure the owner # can be set to something other than what was used to authenticate auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } create_headers = { 'X-Image-Meta-Name': 'MyImage', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Image-Meta-Owner': 'tenant2', } create_headers.update(auth_headers) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=create_headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(200, response.status) self.assertEqual('tenant2', response['x-image-meta-owner']) # Now add an image without admin privileges and ensure the owner # cannot be set to something other than what was used to authenticate auth_headers = { 'X-Auth-Token': 'user1:tenant1:role1', } create_headers.update(auth_headers) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=create_headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] # We have to be admin to see the owner auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } create_headers.update(auth_headers) path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(200, response.status) self.assertEqual('tenant1', response['x-image-meta-owner']) # Make sure the non-privileged user can't update their owner either update_headers = { 'X-Image-Meta-Name': 'MyImage2', 'X-Image-Meta-Owner': 'tenant2', 'X-Auth-Token': 'user1:tenant1:role1', } path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'PUT', headers=update_headers) self.assertEqual(200, response.status) # We have to be admin to see the owner auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(200, response.status) self.assertEqual('tenant1', response['x-image-meta-owner']) # An admin user should be able to update the owner auth_headers = { 'X-Auth-Token': 'user1:tenant3:admin', } update_headers = { 'X-Image-Meta-Name': 'MyImage2', 'X-Image-Meta-Owner': 'tenant2', } update_headers.update(auth_headers) path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'PUT', headers=update_headers) self.assertEqual(200, response.status) path = "/v1/images/%s" % (image_id) response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(200, response.status) self.assertEqual('tenant2', response['x-image-meta-owner']) def test_image_visibility_to_different_users(self): owners = ['admin', 'tenant1', 'tenant2', 'none'] visibilities = {'public': 'True', 'private': 'False'} image_ids = {} for owner in owners: for visibility, is_public in visibilities.items(): name = '%s-%s' % (owner, visibility) headers = { 'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': name, 'X-Image-Meta-Status': 'active', 'X-Image-Meta-Is-Public': is_public, 'X-Image-Meta-Owner': owner, 'X-Auth-Token': 'createuser:createtenant:admin', } path = "/v1/images" response, content = self.http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_ids[name] = data['image']['id'] def list_images(tenant, role='', is_public=None): auth_token = 'user:%s:%s' % (tenant, role) headers = {'X-Auth-Token': auth_token} path = "/v1/images/detail" if is_public is not None: path += '?is_public=%s' % is_public response, content = self.http.request(path, 'GET', headers=headers) self.assertEqual(200, response.status) return jsonutils.loads(content)['images'] # 1. Known user sees public and their own images images = list_images('tenant1') self.assertEqual(5, len(images)) for image in images: self.assertTrue(image['is_public'] or image['owner'] == 'tenant1') # 2. Unknown user sees only public images images = list_images('none') self.assertEqual(4, len(images)) for image in images: self.assertTrue(image['is_public']) # 3. Unknown admin sees only public images images = list_images('none', role='admin') self.assertEqual(4, len(images)) for image in images: self.assertTrue(image['is_public']) # 4. Unknown admin, is_public=none, shows all images images = list_images('none', role='admin', is_public='none') self.assertEqual(8, len(images)) # 5. Unknown admin, is_public=true, shows only public images images = list_images('none', role='admin', is_public='true') self.assertEqual(4, len(images)) for image in images: self.assertTrue(image['is_public']) # 6. Unknown admin, is_public=false, sees only private images images = list_images('none', role='admin', is_public='false') self.assertEqual(4, len(images)) for image in images: self.assertFalse(image['is_public']) # 7. Known admin sees public and their own images images = list_images('admin', role='admin') self.assertEqual(5, len(images)) for image in images: self.assertTrue(image['is_public'] or image['owner'] == 'admin') # 8. Known admin, is_public=none, shows all images images = list_images('admin', role='admin', is_public='none') self.assertEqual(8, len(images)) # 9. Known admin, is_public=true, sees all public and their images images = list_images('admin', role='admin', is_public='true') self.assertEqual(5, len(images)) for image in images: self.assertTrue(image['is_public'] or image['owner'] == 'admin') # 10. Known admin, is_public=false, sees all private images images = list_images('admin', role='admin', is_public='false') self.assertEqual(4, len(images)) for image in images: self.assertFalse(image['is_public']) def test_property_protections(self): # Enable property protection self.config(property_protection_file=self.property_file) self.init() CREATE_HEADERS = { 'X-Image-Meta-Name': 'MyImage', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Image-Meta-Owner': 'tenant2', } # Create an image for role member with extra properties # Raises 403 since user is not allowed to create 'foo' auth_headers = { 'X-Auth-Token': 'user1:tenant1:member', } custom_props = { 'x-image-meta-property-foo': 'bar' } auth_headers.update(custom_props) auth_headers.update(CREATE_HEADERS) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=auth_headers) self.assertEqual(403, response.status) # Create an image for role member without 'foo' auth_headers = { 'X-Auth-Token': 'user1:tenant1:member', } custom_props = { 'x-image-meta-property-x_owner_foo': 'o_s_bar', } auth_headers.update(custom_props) auth_headers.update(CREATE_HEADERS) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=auth_headers) self.assertEqual(201, response.status) # Returned image entity should have 'x_owner_foo' data = jsonutils.loads(content) self.assertEqual('o_s_bar', data['image']['properties']['x_owner_foo']) # Create an image for role spl_role with extra properties auth_headers = { 'X-Auth-Token': 'user1:tenant1:spl_role', } custom_props = { 'X-Image-Meta-Property-spl_create_prop': 'create_bar', 'X-Image-Meta-Property-spl_read_prop': 'read_bar', 'X-Image-Meta-Property-spl_update_prop': 'update_bar', 'X-Image-Meta-Property-spl_delete_prop': 'delete_bar' } auth_headers.update(custom_props) auth_headers.update(CREATE_HEADERS) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=auth_headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] # Attempt to update two properties, one protected(spl_read_prop), the # other not(spl_update_prop). Request should be forbidden. auth_headers = { 'X-Auth-Token': 'user1:tenant1:spl_role', } custom_props = { 'X-Image-Meta-Property-spl_read_prop': 'r', 'X-Image-Meta-Property-spl_update_prop': 'u', 'X-Glance-Registry-Purge-Props': 'False' } auth_headers.update(auth_headers) auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(403, response.status) # Attempt to create properties which are forbidden auth_headers = { 'X-Auth-Token': 'user1:tenant1:spl_role', } custom_props = { 'X-Image-Meta-Property-spl_new_prop': 'new', 'X-Glance-Registry-Purge-Props': 'True' } auth_headers.update(auth_headers) auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(403, response.status) # Attempt to update, create and delete properties auth_headers = { 'X-Auth-Token': 'user1:tenant1:spl_role', } custom_props = { 'X-Image-Meta-Property-spl_create_prop': 'create_bar', 'X-Image-Meta-Property-spl_read_prop': 'read_bar', 'X-Image-Meta-Property-spl_update_prop': 'u', 'X-Glance-Registry-Purge-Props': 'True' } auth_headers.update(auth_headers) auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(200, response.status) # Returned image entity should reflect the changes image = jsonutils.loads(content) # 'spl_update_prop' has update permission for spl_role # hence the value has changed self.assertEqual('u', image['image']['properties']['spl_update_prop']) # 'spl_delete_prop' has delete permission for spl_role # hence the property has been deleted self.assertNotIn('spl_delete_prop', image['image']['properties']) # 'spl_create_prop' has create permission for spl_role # hence the property has been created self.assertEqual('create_bar', image['image']['properties']['spl_create_prop']) # Image Deletion should work auth_headers = { 'X-Auth-Token': 'user1:tenant1:spl_role', } path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'DELETE', headers=auth_headers) self.assertEqual(200, response.status) # This image should be no longer be directly accessible auth_headers = { 'X-Auth-Token': 'user1:tenant1:spl_role', } path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(404, response.status) def test_property_protections_special_chars(self): # Enable property protection self.config(property_protection_file=self.property_file) self.init() CREATE_HEADERS = { 'X-Image-Meta-Name': 'MyImage', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Image-Meta-Owner': 'tenant2', 'X-Image-Meta-Size': '0', } # Create an image auth_headers = { 'X-Auth-Token': 'user1:tenant1:member', } auth_headers.update(CREATE_HEADERS) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=auth_headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] # Verify both admin and unknown role can create properties marked with # '@' auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Image-Meta-Property-x_all_permitted_admin': '1' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(200, response.status) image = jsonutils.loads(content) self.assertEqual('1', image['image']['properties']['x_all_permitted_admin']) auth_headers = { 'X-Auth-Token': 'user1:tenant1:joe_soap', } custom_props = { 'X-Image-Meta-Property-x_all_permitted_joe_soap': '1', 'X-Glance-Registry-Purge-Props': 'False' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(200, response.status) image = jsonutils.loads(content) self.assertEqual( '1', image['image']['properties']['x_all_permitted_joe_soap']) # Verify both admin and unknown role can read properties marked with # '@' auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(200, response.status) self.assertEqual('1', response.get( 'x-image-meta-property-x_all_permitted_admin')) self.assertEqual('1', response.get( 'x-image-meta-property-x_all_permitted_joe_soap')) auth_headers = { 'X-Auth-Token': 'user1:tenant1:joe_soap', } path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(200, response.status) self.assertEqual('1', response.get( 'x-image-meta-property-x_all_permitted_admin')) self.assertEqual('1', response.get( 'x-image-meta-property-x_all_permitted_joe_soap')) # Verify both admin and unknown role can update properties marked with # '@' auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Image-Meta-Property-x_all_permitted_admin': '2', 'X-Glance-Registry-Purge-Props': 'False' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(200, response.status) image = jsonutils.loads(content) self.assertEqual('2', image['image']['properties']['x_all_permitted_admin']) auth_headers = { 'X-Auth-Token': 'user1:tenant1:joe_soap', } custom_props = { 'X-Image-Meta-Property-x_all_permitted_joe_soap': '2', 'X-Glance-Registry-Purge-Props': 'False' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(200, response.status) image = jsonutils.loads(content) self.assertEqual( '2', image['image']['properties']['x_all_permitted_joe_soap']) # Verify both admin and unknown role can delete properties marked with # '@' auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Image-Meta-Property-x_all_permitted_joe_soap': '2', 'X-Glance-Registry-Purge-Props': 'True' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(200, response.status) image = jsonutils.loads(content) self.assertNotIn('x_all_permitted_admin', image['image']['properties']) auth_headers = { 'X-Auth-Token': 'user1:tenant1:joe_soap', } custom_props = { 'X-Glance-Registry-Purge-Props': 'True' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(200, response.status) image = jsonutils.loads(content) self.assertNotIn('x_all_permitted_joe_soap', image['image']['properties']) # Verify neither admin nor unknown role can create a property protected # with '!' auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Image-Meta-Property-x_none_permitted_admin': '1' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(403, response.status) auth_headers = { 'X-Auth-Token': 'user1:tenant1:joe_soap', } custom_props = { 'X-Image-Meta-Property-x_none_permitted_joe_soap': '1' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(403, response.status) # Verify neither admin nor unknown role can read properties marked with # '!' auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Image-Meta-Property-x_none_read': '1' } auth_headers.update(custom_props) auth_headers.update(CREATE_HEADERS) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=auth_headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(200, response.status) self.assertRaises(KeyError, response.get, 'X-Image-Meta-Property-x_none_read') auth_headers = { 'X-Auth-Token': 'user1:tenant1:joe_soap', } path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'HEAD', headers=auth_headers) self.assertEqual(200, response.status) self.assertRaises(KeyError, response.get, 'X-Image-Meta-Property-x_none_read') # Verify neither admin nor unknown role can update properties marked # with '!' auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Image-Meta-Property-x_none_update': '1' } auth_headers.update(custom_props) auth_headers.update(CREATE_HEADERS) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=auth_headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Image-Meta-Property-x_none_update': '2' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(403, response.status) auth_headers = { 'X-Auth-Token': 'user1:tenant1:joe_soap', } custom_props = { 'X-Image-Meta-Property-x_none_update': '2' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(403, response.status) # Verify neither admin nor unknown role can delete properties marked # with '!' auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Image-Meta-Property-x_none_delete': '1' } auth_headers.update(custom_props) auth_headers.update(CREATE_HEADERS) path = "/v1/images" response, content = self.http.request(path, 'POST', headers=auth_headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] auth_headers = { 'X-Auth-Token': 'user1:tenant1:admin', } custom_props = { 'X-Glance-Registry-Purge-Props': 'True' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(403, response.status) auth_headers = { 'X-Auth-Token': 'user1:tenant1:joe_soap', } custom_props = { 'X-Glance-Registry-Purge-Props': 'True' } auth_headers.update(custom_props) path = "/v1/images/%s" % image_id response, content = self.http.request(path, 'PUT', headers=auth_headers) self.assertEqual(403, response.status) glance-12.0.0/glance/tests/integration/legacy_functional/__init__.py0000664000567000056710000000000012701407047026671 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/integration/legacy_functional/base.py0000664000567000056710000001627112701407047026065 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import os.path import tempfile import fixtures import glance_store from oslo_config import cfg from oslo_db import options import glance.common.client from glance.common import config from glance.db import migration import glance.db.sqlalchemy.api import glance.registry.client.v1.client from glance import tests as glance_tests from glance.tests import utils as test_utils TESTING_API_PASTE_CONF = """ [pipeline:glance-api] pipeline = versionnegotiation gzip unauthenticated-context rootapp [pipeline:glance-api-caching] pipeline = versionnegotiation gzip unauthenticated-context cache rootapp [pipeline:glance-api-cachemanagement] pipeline = versionnegotiation gzip unauthenticated-context cache cache_manage rootapp [pipeline:glance-api-fakeauth] pipeline = versionnegotiation gzip fakeauth context rootapp [pipeline:glance-api-noauth] pipeline = versionnegotiation gzip context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v1: apiv1app /v2: apiv2app [app:apiversions] paste.app_factory = glance.api.versions:create_resource [app:apiv1app] paste.app_factory = glance.api.v1.router:API.factory [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory [filter:gzip] paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory [filter:cache_manage] paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory """ TESTING_REGISTRY_PASTE_CONF = """ [pipeline:glance-registry] pipeline = unauthenticated-context registryapp [pipeline:glance-registry-fakeauth] pipeline = fakeauth context registryapp [app:registryapp] paste.app_factory = glance.registry.api.v1:API.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory """ CONF = cfg.CONF class ApiTest(test_utils.BaseTestCase): def setUp(self): super(ApiTest, self).setUp() self.init() def init(self): self.test_dir = self.useFixture(fixtures.TempDir()).path self._configure_logging() self._configure_policy() self._setup_database() self._setup_stores() self._setup_property_protection() self.glance_registry_app = self._load_paste_app( 'glance-registry', flavor=getattr(self, 'registry_flavor', ''), conf=getattr(self, 'registry_paste_conf', TESTING_REGISTRY_PASTE_CONF), ) self._connect_registry_client() self.glance_api_app = self._load_paste_app( 'glance-api', flavor=getattr(self, 'api_flavor', ''), conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF), ) self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app) def _setup_property_protection(self): self._copy_data_file('property-protections.conf', self.test_dir) self.property_file = os.path.join(self.test_dir, 'property-protections.conf') def _configure_policy(self): policy_file = self._copy_data_file('policy.json', self.test_dir) self.config(policy_file=policy_file, group='oslo_policy') def _configure_logging(self): self.config(default_log_levels=[ 'amqplib=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'suds=INFO', 'keystone=INFO', 'eventlet.wsgi.server=DEBUG' ]) def _setup_database(self): sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir options.set_defaults(CONF, connection=sql_connection) glance.db.sqlalchemy.api.clear_db_env() glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' if glance_db_env in os.environ: # use the empty db created and cached as a tempfile # instead of spending the time creating a new one db_location = os.environ[glance_db_env] test_utils.execute('cp %s %s/tests.sqlite' % (db_location, self.test_dir)) else: migration.db_sync() # copy the clean db to a temp location so that it # can be reused for future tests (osf, db_location) = tempfile.mkstemp() os.close(osf) test_utils.execute('cp %s/tests.sqlite %s' % (self.test_dir, db_location)) os.environ[glance_db_env] = db_location # cleanup the temp file when the test suite is # complete def _delete_cached_db(): try: os.remove(os.environ[glance_db_env]) except Exception: glance_tests.logger.exception( "Error cleaning up the file %s" % os.environ[glance_db_env]) atexit.register(_delete_cached_db) def _setup_stores(self): glance_store.register_opts(CONF) image_dir = os.path.join(self.test_dir, "images") self.config(group='glance_store', filesystem_store_datadir=image_dir) glance_store.create_stores() def _load_paste_app(self, name, flavor, conf): conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name) with open(conf_file_path, 'wb') as conf_file: conf_file.write(conf) conf_file.flush() return config.load_paste_app(name, flavor=flavor, conf_file=conf_file_path) def _connect_registry_client(self): def get_connection_type(self2): def wrapped(*args, **kwargs): return test_utils.HttplibWsgiAdapter(self.glance_registry_app) return wrapped self.stubs.Set(glance.common.client.BaseClient, 'get_connection_type', get_connection_type) def tearDown(self): glance.db.sqlalchemy.api.clear_db_env() super(ApiTest, self).tearDown() glance-12.0.0/glance/tests/integration/__init__.py0000664000567000056710000000000012701407047023203 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/integration/v2/0000775000567000056710000000000012701407204021426 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/integration/v2/__init__.py0000664000567000056710000000000012701407047023532 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/integration/v2/test_property_quota_violations.py0000664000567000056710000001230212701407047030406 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.tests.integration.v2 import base CONF = cfg.CONF class TestPropertyQuotaViolations(base.ApiTest): def __init__(self, *args, **kwargs): super(TestPropertyQuotaViolations, self).__init__(*args, **kwargs) self.api_flavor = 'noauth' self.registry_flavor = 'fakeauth' def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': "foo", 'X-Roles': 'member', } base_headers.update(custom_headers or {}) return base_headers def _get(self, image_id=""): path = ('/v2/images/%s' % image_id).rstrip('/') rsp, content = self.http.request(path, 'GET', headers=self._headers()) self.assertEqual(200, rsp.status) content = jsonutils.loads(content) return content def _create_image(self, body): path = '/v2/images' headers = self._headers({'content-type': 'application/json'}) rsp, content = self.http.request(path, 'POST', headers=headers, body=jsonutils.dumps(body)) self.assertEqual(201, rsp.status) return jsonutils.loads(content) def _patch(self, image_id, body, expected_status): path = '/v2/images/%s' % image_id media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) rsp, content = self.http.request(path, 'PATCH', headers=headers, body=jsonutils.dumps(body)) self.assertEqual(expected_status, rsp.status, content) return content def test_property_ops_when_quota_violated(self): # Image list must be empty to begin with image_list = self._get()['images'] self.assertEqual(0, len(image_list)) orig_property_quota = 10 CONF.set_override('image_property_quota', orig_property_quota, enforce_type=True) # Create an image (with deployer-defined properties) req_body = {'name': 'testimg', 'disk_format': 'aki', 'container_format': 'aki'} for i in range(orig_property_quota): req_body['k_%d' % i] = 'v_%d' % i image = self._create_image(req_body) image_id = image['id'] for i in range(orig_property_quota): self.assertEqual('v_%d' % i, image['k_%d' % i]) # Now reduce property quota. We should be allowed to modify/delete # existing properties (even if the result still exceeds property quota) # but not add new properties nor replace existing properties with new # properties (as long as we're over the quota) self.config(image_property_quota=2) patch_body = [{'op': 'replace', 'path': '/k_4', 'value': 'v_4.new'}] image = jsonutils.loads(self._patch(image_id, patch_body, 200)) self.assertEqual('v_4.new', image['k_4']) patch_body = [{'op': 'remove', 'path': '/k_7'}] image = jsonutils.loads(self._patch(image_id, patch_body, 200)) self.assertNotIn('k_7', image) patch_body = [{'op': 'add', 'path': '/k_100', 'value': 'v_100'}] self._patch(image_id, patch_body, 413) image = self._get(image_id) self.assertNotIn('k_100', image) patch_body = [ {'op': 'remove', 'path': '/k_5'}, {'op': 'add', 'path': '/k_100', 'value': 'v_100'}, ] self._patch(image_id, patch_body, 413) image = self._get(image_id) self.assertNotIn('k_100', image) self.assertIn('k_5', image) # temporary violations to property quota should be allowed as long as # it's within one PATCH request and the end result does not violate # quotas. patch_body = [{'op': 'add', 'path': '/k_100', 'value': 'v_100'}, {'op': 'add', 'path': '/k_99', 'value': 'v_99'}] to_rm = ['k_%d' % i for i in range(orig_property_quota) if i != 7] patch_body.extend([{'op': 'remove', 'path': '/%s' % k} for k in to_rm]) image = jsonutils.loads(self._patch(image_id, patch_body, 200)) self.assertEqual('v_99', image['k_99']) self.assertEqual('v_100', image['k_100']) for k in to_rm: self.assertNotIn(k, image) glance-12.0.0/glance/tests/integration/v2/base.py0000664000567000056710000001606612701407047022730 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import os.path import tempfile import fixtures import glance_store from oslo_config import cfg from oslo_db import options import glance.common.client from glance.common import config from glance.db import migration import glance.db.sqlalchemy.api import glance.registry.client.v1.client from glance import tests as glance_tests from glance.tests import utils as test_utils TESTING_API_PASTE_CONF = """ [pipeline:glance-api] pipeline = versionnegotiation gzip unauthenticated-context rootapp [pipeline:glance-api-caching] pipeline = versionnegotiation gzip unauthenticated-context cache rootapp [pipeline:glance-api-cachemanagement] pipeline = versionnegotiation gzip unauthenticated-context cache cache_manage rootapp [pipeline:glance-api-fakeauth] pipeline = versionnegotiation gzip fakeauth context rootapp [pipeline:glance-api-noauth] pipeline = versionnegotiation gzip context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v1: apiv1app /v2: apiv2app [app:apiversions] paste.app_factory = glance.api.versions:create_resource [app:apiv1app] paste.app_factory = glance.api.v1.router:API.factory [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory [filter:gzip] paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory [filter:cache_manage] paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory """ TESTING_REGISTRY_PASTE_CONF = """ [pipeline:glance-registry] pipeline = unauthenticated-context registryapp [pipeline:glance-registry-fakeauth] pipeline = fakeauth context registryapp [app:registryapp] paste.app_factory = glance.registry.api.v1:API.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory """ CONF = cfg.CONF class ApiTest(test_utils.BaseTestCase): def setUp(self): super(ApiTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self._configure_logging() self._setup_database() self._setup_stores() self._setup_property_protection() self.glance_registry_app = self._load_paste_app( 'glance-registry', flavor=getattr(self, 'registry_flavor', ''), conf=getattr(self, 'registry_paste_conf', TESTING_REGISTRY_PASTE_CONF), ) self._connect_registry_client() self.glance_api_app = self._load_paste_app( 'glance-api', flavor=getattr(self, 'api_flavor', ''), conf=getattr(self, 'api_paste_conf', TESTING_API_PASTE_CONF), ) self.http = test_utils.Httplib2WsgiAdapter(self.glance_api_app) def _setup_property_protection(self): self._copy_data_file('property-protections.conf', self.test_dir) self.property_file = os.path.join(self.test_dir, 'property-protections.conf') def _configure_logging(self): self.config(default_log_levels=[ 'amqplib=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'suds=INFO', 'keystone=INFO', 'eventlet.wsgi.server=DEBUG' ]) def _setup_database(self): sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir options.set_defaults(CONF, connection=sql_connection, sqlite_db='glance.sqlite') glance.db.sqlalchemy.api.clear_db_env() glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' if glance_db_env in os.environ: # use the empty db created and cached as a tempfile # instead of spending the time creating a new one db_location = os.environ[glance_db_env] test_utils.execute('cp %s %s/tests.sqlite' % (db_location, self.test_dir)) else: migration.db_sync() # copy the clean db to a temp location so that it # can be reused for future tests (osf, db_location) = tempfile.mkstemp() os.close(osf) test_utils.execute('cp %s/tests.sqlite %s' % (self.test_dir, db_location)) os.environ[glance_db_env] = db_location # cleanup the temp file when the test suite is # complete def _delete_cached_db(): try: os.remove(os.environ[glance_db_env]) except Exception: glance_tests.logger.exception( "Error cleaning up the file %s" % os.environ[glance_db_env]) atexit.register(_delete_cached_db) def _setup_stores(self): glance_store.register_opts(CONF) image_dir = os.path.join(self.test_dir, "images") self.config(group='glance_store', filesystem_store_datadir=image_dir) glance_store.create_stores() def _load_paste_app(self, name, flavor, conf): conf_file_path = os.path.join(self.test_dir, '%s-paste.ini' % name) with open(conf_file_path, 'wb') as conf_file: conf_file.write(conf) conf_file.flush() return config.load_paste_app(name, flavor=flavor, conf_file=conf_file_path) def _connect_registry_client(self): def get_connection_type(self2): def wrapped(*args, **kwargs): return test_utils.HttplibWsgiAdapter(self.glance_registry_app) return wrapped self.stubs.Set(glance.common.client.BaseClient, 'get_connection_type', get_connection_type) def tearDown(self): glance.db.sqlalchemy.api.clear_db_env() super(ApiTest, self).tearDown() glance-12.0.0/glance/tests/integration/v2/test_tasks_api.py0000664000567000056710000004745112701407047025035 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from oslo_serialization import jsonutils as json from glance.api.v2 import tasks from glance.common import timeutils from glance.tests.integration.v2 import base TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' TENANT3 = '5a3e60e8-cfa9-4a9e-a90a-62b42cea92b8' TENANT4 = 'c6c87f25-8a94-47ed-8c83-053c25f42df4' def minimal_task_headers(owner='tenant1'): headers = { 'X-Auth-Token': 'user1:%s:admin' % owner, 'Content-Type': 'application/json', } return headers def _new_task_fixture(**kwargs): task_data = { "type": "import", "input": { "import_from": "http://example.com", "import_from_format": "qcow2", "image_properties": { 'disk_format': 'vhd', 'container_format': 'ovf' } } } task_data.update(kwargs) return task_data class TestTasksApi(base.ApiTest): def __init__(self, *args, **kwargs): super(TestTasksApi, self).__init__(*args, **kwargs) self.api_flavor = 'fakeauth' self.registry_flavor = 'fakeauth' def _wait_on_task_execution(self): """Wait until all the tasks have finished execution and are in state of success or failure. """ start = timeutils.utcnow() # wait for maximum of 5 seconds while timeutils.delta_seconds(start, timeutils.utcnow()) < 5: wait = False # Verify that no task is in status of pending or processing path = "/v2/tasks" res, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(200, res.status) res_tasks = content_dict['tasks'] if len(res_tasks) != 0: for task in res_tasks: if task['status'] in ('pending', 'processing'): wait = True break if wait: # Bug #1541487: we must give time to the server to execute the # task, but the server is run in the same process than the # test. Use eventlet to give the control to the pending server # task. eventlet.sleep(0.05) continue else: break def _post_new_task(self, **kwargs): task_owner = kwargs.get('owner') headers = minimal_task_headers(task_owner) task_data = _new_task_fixture() task_data['input']['import_from'] = "http://example.com" body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request(path, 'POST', headers=headers, body=body_content) self.assertEqual(201, response.status) task = json.loads(content) task_id = task['id'] self.assertIsNotNone(task_id) self.assertEqual(task_owner, task['owner']) self.assertEqual(task_data['type'], task['type']) self.assertEqual(task_data['input'], task['input']) return task, task_data def test_all_task_api(self): # 0. GET /tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) content_dict = json.loads(content) self.assertEqual(200, response.status) self.assertFalse(content_dict['tasks']) # 1. GET /tasks/{task_id} # Verify non-existent task task_id = 'NON_EXISTENT_TASK' path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(404, response.status) # 2. POST /tasks # Create a new task task_owner = 'tenant1' data, req_input = self._post_new_task(owner=task_owner) # 3. GET /tasks/{task_id} # Get an existing task task_id = data['id'] path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) # NOTE(sabari): wait for all task executions to finish before checking # task status. self._wait_on_task_execution() # 4. GET /tasks # Get all tasks (not deleted) path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) self.assertIsNotNone(content) data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(1, len(data['tasks'])) # NOTE(venkatesh) find a way to get expected_keys from tasks controller expected_keys = set(['id', 'expires_at', 'type', 'owner', 'status', 'created_at', 'updated_at', 'self', 'schema']) task = data['tasks'][0] self.assertEqual(expected_keys, set(task.keys())) self.assertEqual(req_input['type'], task['type']) self.assertEqual(task_owner, task['owner']) self.assertEqual('success', task['status']) self.assertIsNotNone(task['created_at']) self.assertIsNotNone(task['updated_at']) def test_task_schema_api(self): # 0. GET /schemas/task # Verify schema for task path = "/v2/schemas/task" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) schema = tasks.get_task_schema() expected_schema = schema.minimal() data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(expected_schema, data) # 1. GET /schemas/tasks # Verify schema for tasks path = "/v2/schemas/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) schema = tasks.get_collection_schema() expected_schema = schema.minimal() data = json.loads(content) self.assertIsNotNone(data) self.assertEqual(expected_schema, data) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_create_new_task(self): # 0. POST /tasks # Create a new task with valid input and type task_data = _new_task_fixture() task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(201, response.status) data = json.loads(content) task_id = data['id'] self.assertIsNotNone(task_id) self.assertEqual(task_owner, data['owner']) self.assertEqual(task_data['type'], data['type']) self.assertEqual(task_data['input'], data['input']) # 1. POST /tasks # Create a new task with invalid type # Expect BadRequest(400) Error as response task_data = _new_task_fixture(type='invalid') task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(400, response.status) # 1. POST /tasks # Create a new task with invalid input for type 'import' # Expect BadRequest(400) Error as response task_data = _new_task_fixture(task_input='{something: invalid}') task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(400, response.status) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_tasks_with_filter(self): # 0. GET /v2/tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) content_dict = json.loads(content) self.assertFalse(content_dict['tasks']) task_ids = [] # 1. Make 2 POST requests on /tasks with various attributes task_owner = TENANT1 data, req_input1 = self._post_new_task(owner=task_owner) task_ids.append(data['id']) task_owner = TENANT2 data, req_input2 = self._post_new_task(owner=task_owner) task_ids.append(data['id']) # 2. GET /tasks # Verify two import tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) content_dict = json.loads(content) self.assertEqual(2, len(content_dict['tasks'])) # 3. GET /tasks with owner filter # Verify correct task returned with owner params = "owner=%s" % TENANT1 path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) content_dict = json.loads(content) self.assertEqual(1, len(content_dict['tasks'])) self.assertEqual(TENANT1, content_dict['tasks'][0]['owner']) # Check the same for different owner. params = "owner=%s" % TENANT2 path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) content_dict = json.loads(content) self.assertEqual(1, len(content_dict['tasks'])) self.assertEqual(TENANT2, content_dict['tasks'][0]['owner']) # 4. GET /tasks with type filter # Verify correct task returned with type params = "type=import" path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) content_dict = json.loads(content) self.assertEqual(2, len(content_dict['tasks'])) actual_task_ids = [task['id'] for task in content_dict['tasks']] self.assertEqual(set(task_ids), set(actual_task_ids)) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_limited_tasks(self): """ Ensure marker and limit query params work """ # 0. GET /tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) tasks = json.loads(content) self.assertFalse(tasks['tasks']) task_ids = [] # 1. POST /tasks with three tasks with various attributes task, _ = self._post_new_task(owner=TENANT1) task_ids.append(task['id']) task, _ = self._post_new_task(owner=TENANT2) task_ids.append(task['id']) task, _ = self._post_new_task(owner=TENANT3) task_ids.append(task['id']) # 2. GET /tasks # Verify 3 tasks are returned path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) tasks = json.loads(content)['tasks'] self.assertEqual(3, len(tasks)) # 3. GET /tasks with limit of 2 # Verify only two tasks were returned params = "limit=2" path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(2, len(actual_tasks)) self.assertEqual(tasks[0]['id'], actual_tasks[0]['id']) self.assertEqual(tasks[1]['id'], actual_tasks[1]['id']) # 4. GET /tasks with marker # Verify only two tasks were returned params = "marker=%s" % tasks[0]['id'] path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(2, len(actual_tasks)) self.assertEqual(tasks[1]['id'], actual_tasks[0]['id']) self.assertEqual(tasks[2]['id'], actual_tasks[1]['id']) # 5. GET /tasks with marker and limit # Verify only one task was returned with the correct id params = "limit=1&marker=%s" % tasks[1]['id'] path = "/v2/tasks?%s" % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(1, len(actual_tasks)) self.assertEqual(tasks[2]['id'], actual_tasks[0]['id']) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_ordered_tasks(self): # 0. GET /tasks # Verify no tasks path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) tasks = json.loads(content) self.assertFalse(tasks['tasks']) task_ids = [] # 1. POST /tasks with three tasks with various attributes task, _ = self._post_new_task(owner=TENANT1) task_ids.append(task['id']) task, _ = self._post_new_task(owner=TENANT2) task_ids.append(task['id']) task, _ = self._post_new_task(owner=TENANT3) task_ids.append(task['id']) # 2. GET /tasks with no query params # Verify three tasks sorted by created_at desc # 2. GET /tasks # Verify 3 tasks are returned path = "/v2/tasks" response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(3, len(actual_tasks)) self.assertEqual(task_ids[2], actual_tasks[0]['id']) self.assertEqual(task_ids[1], actual_tasks[1]['id']) self.assertEqual(task_ids[0], actual_tasks[2]['id']) # 3. GET /tasks sorted by owner asc params = 'sort_key=owner&sort_dir=asc' path = '/v2/tasks?%s' % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) expected_task_owners = [TENANT1, TENANT2, TENANT3] expected_task_owners.sort() actual_tasks = json.loads(content)['tasks'] self.assertEqual(3, len(actual_tasks)) self.assertEqual(expected_task_owners, [t['owner'] for t in actual_tasks]) # 4. GET /tasks sorted by owner desc with a marker params = 'sort_key=owner&sort_dir=desc&marker=%s' % task_ids[0] path = '/v2/tasks?%s' % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(2, len(actual_tasks)) self.assertEqual(task_ids[2], actual_tasks[0]['id']) self.assertEqual(task_ids[1], actual_tasks[1]['id']) self.assertEqual(TENANT3, actual_tasks[0]['owner']) self.assertEqual(TENANT2, actual_tasks[1]['owner']) # 5. GET /tasks sorted by owner asc with a marker params = 'sort_key=owner&sort_dir=asc&marker=%s' % task_ids[0] path = '/v2/tasks?%s' % params response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) actual_tasks = json.loads(content)['tasks'] self.assertEqual(0, len(actual_tasks)) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() def test_delete_task(self): # 0. POST /tasks # Create a new task with valid input and type task_data = _new_task_fixture() task_owner = 'tenant1' body_content = json.dumps(task_data) path = "/v2/tasks" response, content = self.http.request( path, 'POST', headers=minimal_task_headers(task_owner), body=body_content) self.assertEqual(201, response.status) data = json.loads(content) task_id = data['id'] # 1. DELETE on /tasks/{task_id} # Attempt to delete a task path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'DELETE', headers=minimal_task_headers()) self.assertEqual(405, response.status) self.assertEqual('GET', response.webob_resp.headers.get('Allow')) self.assertEqual(('GET',), response.webob_resp.allow) self.assertEqual(('GET',), response.allow) # 2. GET /tasks/{task_id} # Ensure that methods mentioned in the Allow header work path = "/v2/tasks/%s" % task_id response, content = self.http.request(path, 'GET', headers=minimal_task_headers()) self.assertEqual(200, response.status) self.assertIsNotNone(content) # NOTE(nikhil): wait for all task executions to finish before exiting # else there is a risk of running into deadlock self._wait_on_task_execution() glance-12.0.0/glance/tests/functional/0000775000567000056710000000000012701407204020716 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/functional/glare/0000775000567000056710000000000012701407204022010 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/functional/glare/__init__.py0000664000567000056710000000000012701407047024114 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/functional/glare/test_glare.py0000664000567000056710000024150412701407047024526 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import uuid import mock from oslo_serialization import jsonutils import pkg_resources import requests from glance.api.glare.v0_1 import glare from glance.api.glare.v0_1 import router from glance.common.glare import definitions from glance.common.glare import loader from glance.common import wsgi from glance.tests import functional class Artifact(definitions.ArtifactType): __type_name__ = "WithProps" prop1 = definitions.String() prop2 = definitions.Integer() prop_list = definitions.Array(item_type=definitions.Integer()) tuple_prop = definitions.Array(item_type=[definitions.Integer(), definitions.Boolean()]) dict_prop = definitions.Dict(properties={ "foo": definitions.String(), "bar_list": definitions.Array(definitions.Integer())}) dict_prop_strval = definitions.Dict(properties=definitions.String()) depends_on = definitions.ArtifactReference() depends_on_list = definitions.ArtifactReferenceList() class ArtifactNoProps(definitions.ArtifactType): __type_name__ = "NoProp" class ArtifactNoProps1(definitions.ArtifactType): __type_name__ = "NoProp" __type_version__ = "0.5" class ArtifactWithBlob(definitions.ArtifactType): __type_name__ = "WithBlob" blob1 = definitions.BinaryObject() blob_list = definitions.BinaryObjectList() def _create_resource(): plugins = None mock_this = 'stevedore.extension.ExtensionManager._find_entry_points' with mock.patch(mock_this) as fep: path = 'glance.tests.functional.glare.test_glare' fep.return_value = [ pkg_resources.EntryPoint.parse('WithProps=%s:Artifact' % path), pkg_resources.EntryPoint.parse( 'NoProp=%s:ArtifactNoProps' % path), pkg_resources.EntryPoint.parse( 'NoProp=%s:ArtifactNoProps1' % path), pkg_resources.EntryPoint.parse( 'WithBlob=%s:ArtifactWithBlob' % path) ] plugins = loader.ArtifactsPluginLoader('glance.artifacts.types') deserializer = glare.RequestDeserializer(plugins=plugins) serializer = glare.ResponseSerializer() controller = glare.ArtifactsController(plugins=plugins) return wsgi.Resource(controller, deserializer, serializer) class TestRouter(router.API): def _get_artifacts_resource(self): return _create_resource() class TestArtifacts(functional.FunctionalTest): users = { 'user1': { 'id': str(uuid.uuid4()), 'tenant_id': str(uuid.uuid4()), 'token': str(uuid.uuid4()), 'role': 'member' }, 'user2': { 'id': str(uuid.uuid4()), 'tenant_id': str(uuid.uuid4()), 'token': str(uuid.uuid4()), 'role': 'member' }, 'admin': { 'id': str(uuid.uuid4()), 'tenant_id': str(uuid.uuid4()), 'token': str(uuid.uuid4()), 'role': 'admin' } } def setUp(self): super(TestArtifacts, self).setUp() self._set_user('user1') self.api_server.server_name = 'glare' self.api_server.server_module = 'glance.cmd.glare' self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def tearDown(self): self.stop_servers() self._reset_database(self.api_server.sql_connection) super(TestArtifacts, self).tearDown() def _url(self, path): return 'http://127.0.0.1:%d/v0.1/artifacts%s' % (self.api_port, path) def _set_user(self, username): if username not in self.users: raise KeyError self.current_user = username def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': self.users[self.current_user]['token'], 'X-User-Id': self.users[self.current_user]['id'], 'X-Tenant-Id': self.users[self.current_user]['tenant_id'], 'X-Roles': self.users[self.current_user]['role'], } base_headers.update(custom_headers or {}) return base_headers def start_servers(self, **kwargs): # noqa new_paste_conf_base = """[pipeline:glare-api] pipeline = versionnegotiation unauthenticated-context rootapp [pipeline:glare-api-fakeauth] pipeline = versionnegotiation fakeauth context rootapp [pipeline:glare-api-noauth] pipeline = versionnegotiation context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v0.1: glareapi [app:apiversions] paste.app_factory = glance.api.glare.versions:create_resource [app:glareapi] paste.app_factory = glance.tests.functional.glare.test_glare:TestRouter.factory [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation: GlareVersionNegotiationFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory """ self.cleanup() self.api_server.paste_conf_base = new_paste_conf_base super(TestArtifacts, self).start_servers(**kwargs) def _create_artifact(self, type_name, type_version='1.0', data=None, status=201): # create an artifact first artifact_data = data or {'name': 'artifact-1', 'version': '12'} return self._check_artifact_post('/%s/v%s/drafts' % (type_name, type_version), artifact_data, status=status) def _check_artifact_method(self, method, url, data=None, status=200, headers=None): if not headers: headers = self._headers() else: headers = self._headers(headers) headers.setdefault("Content-Type", "application/json") if 'application/json' in headers['Content-Type']: data = jsonutils.dumps(data) response = getattr(requests, method)(self._url(url), headers=headers, data=data) self.assertEqual(status, response.status_code) if status >= 400: return response.text if "application/json" in response.headers["content-type"]: return jsonutils.loads(response.text) return response.text def _check_artifact_post(self, url, data, status=201, headers=None): if headers is None: headers = {'Content-Type': 'application/json'} return self._check_artifact_method("post", url, data, status=status, headers=headers) def _check_artifact_get(self, url, status=200): return self._check_artifact_method("get", url, status=status) def _check_artifact_delete(self, url, status=204): response = requests.delete(self._url(url), headers=self._headers()) self.assertEqual(status, response.status_code) return response.text def _check_artifact_patch(self, url, data, status=200, headers={'Content-Type': 'application/json'}): return self._check_artifact_method("patch", url, data, status=status, headers=headers) def _check_artifact_put(self, url, data, status=200, headers={'Content-Type': 'application/json'}): return self._check_artifact_method("put", url, data, status=status, headers=headers) def test_list_any_artifacts(self): """Returns information about all draft artifacts with given endpoint""" self._create_artifact('noprop') artifacts = self._check_artifact_get('/noprop/drafts')["artifacts"] self.assertEqual(1, len(artifacts)) def test_list_last_version(self): """/artifacts/endpoint == /artifacts/endpoint/all-versions""" self._create_artifact('noprop') artifacts = self._check_artifact_get('/noprop/drafts')["artifacts"] self.assertEqual(1, len(artifacts)) # the same result can be achieved if asked for artifact with # type_version=last version artifacts_precise = self._check_artifact_get( '/noprop/v1.0/drafts')["artifacts"] self.assertEqual(artifacts, artifacts_precise) def test_list_artifacts_by_state(self): """Returns last version of artifacts with given state""" self._create_artifact('noprop') creating_state = self._check_artifact_get( '/noprop/drafts')["artifacts"] self.assertEqual(1, len(creating_state)) # no active [/type_name/active == /type_name] active_state = self._check_artifact_get('/noprop')["artifacts"] self.assertEqual(0, len(active_state)) def test_list_artifacts_with_version(self): """Supplying precise artifact version does not break anything""" self._create_artifact('noprop') list_creating = self._check_artifact_get( '/noprop/v1.0/drafts')["artifacts"] self.assertEqual(1, len(list_creating)) bad_version = self._check_artifact_get('/noprop/v1.0bad', status=400) self.assertIn("Invalid version string: u'1.0bad'", bad_version) def test_list_artifacts_with_pagination(self): """List artifacts with pagination""" # create artifacts art1 = {'name': 'artifact-1', 'version': '12'} art2 = {'name': 'artifact-2', 'version': '12'} self._create_artifact('noprop', data=art1) self._create_artifact('noprop', data=art2) # sorting is desc by default first_page = self._check_artifact_get( '/noprop/drafts?limit=1&sort=name') # check the first artifacts has returned correctly self.assertEqual(1, len(first_page["artifacts"])) self.assertEqual("artifact-2", first_page["artifacts"][0]["name"]) self.assertIn("next", first_page) # check the second page second_page_url = first_page["next"].split("artifacts", 1)[1] second_page = self._check_artifact_get(second_page_url) self.assertIn("next", second_page) self.assertEqual(1, len(second_page["artifacts"])) self.assertEqual("artifact-1", second_page["artifacts"][0]["name"]) # check that the latest item is empty last_page_url = second_page["next"].split("artifacts", 1)[1] last_page = self._check_artifact_get(last_page_url) self.assertEqual(0, len(last_page["artifacts"])) self.assertNotIn("next", last_page) def test_get_artifact_by_id_any_version(self): data = self._create_artifact('noprop') artifact_id = data['id'] artifacts = self._check_artifact_get( '/noprop/%s' % artifact_id) self.assertEqual(artifact_id, artifacts['id']) def test_list_artifact_no_such_version(self): """Version filtering should be applied for existing plugins. An attempt to retrieve an artifact out of existing plugin but with a wrong version should result in 400 BadRequest 'No such plugin has been loaded' """ msg = self._check_artifact_get('/noprop/v0.0.9', 400) self.assertIn("No plugin for 'noprop v 0.0.9' has been loaded", msg) def test_get_artifact_by_id(self): data = self._create_artifact('noprop') artifact_id = data['id'] artifacts = self._check_artifact_get( '/noprop/%s' % artifact_id) self.assertEqual(artifact_id, artifacts['id']) # the same result can be achieved if asked for artifact with # type_version=last version artifacts_precise = self._check_artifact_get( '/noprop/v1.0/%s' % artifact_id) self.assertEqual(artifacts, artifacts_precise) def test_get_artifact_basic_show_level(self): no_prop_art = self._create_artifact('noprop') art = self._create_artifact( 'withprops', data={"name": "name", "version": "42", "depends_on": no_prop_art['id']}) self.assertEqual(no_prop_art['id'], art['depends_on']['id']) self.assertEqual(no_prop_art['name'], art['depends_on']['name']) artifact_id = art['id'] artifact = self._check_artifact_get( '/withprops/%s?show_level=basic' % artifact_id) self.assertEqual(artifact_id, artifact['id']) self.assertIsNone(artifact['depends_on']) def test_get_artifact_none_show_level(self): """Create an artifact (with two deployer-defined properties)""" artifact_data = {'name': 'artifact-1', 'version': '12', 'tags': ['gagaga', 'sesese'], 'prop1': 'Arthur Dent', 'prop2': 42} art = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) expected_artifact = { 'state': 'creating', 'name': 'artifact-1', 'version': '12.0.0', 'tags': ['gagaga', 'sesese'], 'visibility': 'private', 'type_name': 'WithProps', 'type_version': '1.0', 'prop1': 'Arthur Dent', 'prop2': 42 } for key, value in expected_artifact.items(): self.assertEqual(art[key], value, key) artifact_id = art['id'] artifact = self._check_artifact_get( '/withprops/%s?show_level=none' % artifact_id) self.assertEqual(artifact_id, artifact['id']) self.assertIsNone(artifact['prop1']) self.assertIsNone(artifact['prop2']) def test_get_artifact_invalid_show_level(self): no_prop_art = self._create_artifact('noprop') art = self._create_artifact( 'withprops', data={"name": "name", "version": "42", "depends_on": no_prop_art['id']}) self.assertEqual(no_prop_art['id'], art['depends_on']['id']) self.assertEqual(no_prop_art['name'], art['depends_on']['name']) artifact_id = art['id'] # 'hui' is invalid show level self._check_artifact_get( '/noprop/%s?show_level=yoba' % artifact_id, status=400) def test_get_artifact_no_such_id(self): msg = self._check_artifact_get( '/noprop/%s' % str(uuid.uuid4()), status=404) self.assertIn('No artifact found with ID', msg) def test_get_artifact_present_id_wrong_type(self): artifact_data = {'name': 'artifact-1', 'version': '12', 'prop1': '12', 'prop2': 12} art1 = self._create_artifact('withprops', data=artifact_data) art2 = self._create_artifact('noprop') # ok id and type_name but bad type_version should result in 404 self._check_artifact_get('/noprop/v0.5/%s' % str(art2['id']), status=404) # try to access art2 by supplying art1.type and art2.id self._check_artifact_get('/withprops/%s' % str(art2['id']), status=404) self._check_artifact_get('/noprop/%s' % str(art1['id']), status=404) def test_delete_artifact(self): artifact_data = {'name': 'artifact-1', 'version': '12', 'prop1': '12', 'prop2': 12} art1 = self._create_artifact('withprops', data=artifact_data) self._check_artifact_delete('/withprops/v1.0/%s' % art1['id']) art1_deleted = self._check_artifact_get('/withprops/%s' % art1['id'], status=404) self.assertIn('No artifact found with ID', art1_deleted) def test_delete_artifact_no_such_id(self): self._check_artifact_delete('/noprop/v1/%s' % str(uuid.uuid4()), status=404) @unittest.skip("Test is unstable") def test_delete_artifact_with_dependency(self): # make sure that artifact can't be deleted if it has some dependencies # still not deleted art = self._create_artifact('withprops') no_prop_art = self._create_artifact('noprop') art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=[{'value': no_prop_art['id'], 'op': 'replace', 'path': '/depends_on'}, {'value': [no_prop_art['id']], 'op': 'add', 'path': '/depends_on_list'}]) self.assertEqual(no_prop_art['id'], art_updated['depends_on']['id']) self.assertEqual(1, len(art_updated['depends_on_list'])) # try to delete an artifact prior to its dependency res = self._check_artifact_delete('/withprops/v1/%s' % art['id'], status=400) self.assertIn( "Dependency property 'depends_on' has to be deleted first", res) # delete a dependency art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=[{'op': 'remove', 'path': '/depends_on'}]) # try to delete prior to deleting artifact_list dependencies res = self._check_artifact_delete('/withprops/v1/%s' % art['id'], status=400) self.assertIn( "Dependency property 'depends_on_list' has to be deleted first", res) art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=[{'op': 'remove', 'path': '/depends_on_list'}]) # delete dependency list self._check_artifact_delete('/withprops/v1/%s' % art['id']) def test_delete_artifact_with_blob(self): # Upload some data to an artifact art = self._create_artifact('withblob') headers = self._headers({'Content-Type': 'application/octet-stream'}) self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], headers=headers, data='ZZZZZ', status=200) self._check_artifact_delete('/withblob/v1/%s' % art['id']) def test_update_nonexistent_property_by_replace_op(self): art = self._create_artifact('withprops', data={'name': 'some art', 'version': '4.2'}) data = [{'op': 'replace', 'value': 'some value', 'path': '/nonexistent_property'}] result = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data, status=400) self.assertIn('400 Bad Request', result) self.assertIn('Artifact has no property nonexistent_property', result) def test_update_nonexistent_property_by_remove_op(self): art = self._create_artifact('withprops', data={'name': 'some art', 'version': '4.2'}) data = [{'op': 'replace', 'value': 'some value', 'path': '/nonexistent_property'}] result = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data, status=400) self.assertIn('400 Bad Request', result) self.assertIn('Artifact has no property nonexistent_property', result) def test_update_nonexistent_property_by_add_op(self): art = self._create_artifact('withprops', data={'name': 'some art', 'version': '4.2'}) data = [{'op': 'replace', 'value': 'some value', 'path': '/nonexistent_property'}] result = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data, status=400) self.assertIn('400 Bad Request', result) self.assertIn('Artifact has no property nonexistent_property', result) def test_update_array_property_by_replace_op(self): art = self._create_artifact('withprops', data={'name': 'some art', 'version': '4.2'}) self.assertEqual('some art', art['name']) data = [{'op': 'replace', 'value': [1, 2, 3], 'path': '/prop_list'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual([1, 2, 3], art_updated['prop_list']) # now try to change first element of the list data_change_first = [{'op': 'replace', 'value': 42, 'path': '/prop_list/1'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data_change_first) self.assertEqual([1, 42, 3], art_updated['prop_list']) # replace last element data_change_last = [{'op': 'replace', 'value': 24, 'path': '/prop_list/-'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data_change_last) self.assertEqual([1, 42, 24], art_updated['prop_list']) def test_update_dict_property_by_replace_op(self): art = self._create_artifact( 'withprops', data={'name': 'some art', 'version': '4.2', 'dict_prop': {'foo': "Fenchurch", 'bar_list': [42, 42]}}) self.assertEqual({'foo': "Fenchurch", 'bar_list': [42, 42]}, art['dict_prop']) data = [{'op': 'replace', 'value': 24, 'path': '/dict_prop/bar_list/0'}, {'op': 'replace', 'value': 'cello lesson', 'path': '/dict_prop/foo'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual({'foo': 'cello lesson', 'bar_list': [24, 42]}, art_updated['dict_prop']) def test_update_empty_dict_property_by_replace_op(self): art = self._create_artifact('withprops') self.assertIsNone(art['dict_prop']) data = [{'op': 'replace', 'value': "don't panic", 'path': '/dict_prop/foo'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data, status=400) self.assertIn("The provided path 'dict_prop/foo' is invalid", art_updated) def test_update_empty_dict_property_by_remove_op(self): art = self._create_artifact('withprops') self.assertIsNone(art['dict_prop']) data = [{'op': 'remove', 'path': '/dict_prop/bar_list'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data, status=400) self.assertIn("The provided path 'dict_prop/bar_list' is invalid", art_updated) def test_update_dict_property_by_remove_op(self): art = self._create_artifact( 'withprops', data={'name': 'some art', 'version': '4.2', 'dict_prop': {'foo': "Fenchurch", 'bar_list': [42, 42]}}) self.assertEqual({'foo': 'Fenchurch', 'bar_list': [42, 42]}, art['dict_prop']) data = [{'op': 'remove', 'path': '/dict_prop/foo'}, {'op': 'remove', 'path': '/dict_prop/bar_list/1'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual({'bar_list': [42]}, art_updated['dict_prop']) # now delete the whole dict data = [{'op': 'remove', 'path': '/dict_prop'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertIsNone(art_updated['dict_prop']) @unittest.skip("Skipping due to a know bug") def test_update_dict_property_change_values(self): art = self._create_artifact( 'withprops', data={'name': 'some art', 'version': '4.2', 'dict_prop_strval': {'foo': 'Fenchurch', 'bar': 'no value'}}) self.assertEqual({'foo': 'Fenchurch', 'bar': 'no value'}, art['dict_prop_strval']) new_data = [{'op': 'replace', 'path': '/dict_prop_strval', 'value': {'new-foo': 'Arthur Dent'}}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=new_data) self.assertEqual({'new-foo': 'Arthur Dent'}, art_updated['dict_prop_strval']) def test_update_array_property_by_remove_op(self): art = self._create_artifact( 'withprops', data={'name': 'some art', 'version': '4.2', 'prop_list': [1, 2, 3]}) self.assertEqual([1, 2, 3], art['prop_list']) data = [{'op': 'remove', 'path': '/prop_list/0'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual([2, 3], art_updated['prop_list']) # remove last element data = [{'op': 'remove', 'path': '/prop_list/-'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual([2], art_updated['prop_list']) # now delete the whole array data = [{'op': 'remove', 'path': '/prop_list'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertIsNone(art_updated['prop_list']) def test_update_array_property_by_add_op(self): art = self._create_artifact( 'withprops', data={'name': 'some art', 'version': '4.2'}) self.assertIsNone(art['prop_list']) data = [{'op': 'add', 'path': '/prop_list', 'value': [2, 12, 0, 6]}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual([2, 12, 0, 6], art_updated['prop_list']) data = [{'op': 'add', 'path': '/prop_list/2', 'value': 85}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual([2, 12, 85, 0, 6], art_updated['prop_list']) # add where path='/array/-' means append to the end data = [{'op': 'add', 'path': '/prop_list/-', 'value': 7}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual([2, 12, 85, 0, 6, 7], art_updated['prop_list']) # an attempt to add an element to nonexistent position should # result in 400 self.assertEqual(6, len(art_updated['prop_list'])) bad_index_data = [{'op': 'add', 'path': '/prop_list/11', 'value': 42}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=bad_index_data, status=400) self.assertIn("The provided path 'prop_list/11' is invalid", art_updated) def test_update_dict_property_by_add_op(self): art = self._create_artifact("withprops") self.assertIsNone(art['dict_prop']) data = [{'op': 'add', 'path': '/dict_prop/foo', 'value': "some value"}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual({"foo": "some value"}, art_updated['dict_prop']) def test_update_empty_array_property_by_add_op(self): """Test jsonpatch add. According to RFC 6902: * if the array is empty, '/array/0' is a valid path """ create_data = {'name': 'new artifact', 'version': '4.2'} art = self._create_artifact('withprops', data=create_data) self.assertIsNone(art['prop_list']) data = [{'op': 'add', 'path': '/prop_list/0', 'value': 3}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual([3], art_updated['prop_list']) def test_update_tuple_property_by_index(self): art = self._create_artifact( 'withprops', data={'name': 'some art', 'version': '4.2', 'tuple_prop': [1, False]}) self.assertEqual([1, False], art['tuple_prop']) data = [{'op': 'replace', 'value': True, 'path': '/tuple_prop/1'}, {'op': 'replace', 'value': 2, 'path': '/tuple_prop/0'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertEqual([2, True], art_updated['tuple_prop']) def test_update_artifact(self): art = self._create_artifact('noprop') self.assertEqual('artifact-1', art['name']) art_updated = self._check_artifact_patch( '/noprop/v1/%s' % art['id'], data=[{'op': 'replace', 'value': '0.0.9', 'path': '/version'}]) self.assertEqual('0.0.9', art_updated['version']) def test_update_artifact_properties(self): art = self._create_artifact('withprops') for prop in ['prop1', 'prop2']: self.assertIsNone(art[prop]) data = [{'op': 'replace', 'value': 'some value', 'path': '/prop1'}] art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=data) self.assertEqual('some value', art_updated['prop1']) def test_update_remove_non_existent_artifact_properties(self): art = self._create_artifact('withprops') for prop in ['prop1', 'prop2']: self.assertIsNone(art[prop]) data = [{'op': 'remove', 'value': 'some value', 'path': '/non-existent-path/and-another'}] art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=data, status=400) self.assertIn('Artifact has no property', art_updated) def test_update_replace_non_existent_artifact_properties(self): art = self._create_artifact('withprops') for prop in ['prop1', 'prop2']: self.assertIsNone(art[prop]) data = [{'op': 'replace', 'value': 'some value', 'path': '/non-existent-path/and-another'}] art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=data, status=400) self.assertIn('Artifact has no property', art_updated) def test_update_artifact_remove_property(self): artifact_data = {'name': 'artifact-1', 'version': '12', 'tags': ['gagaga', 'sesese'], 'prop1': 'Arthur Dent', 'prop2': 42} art = self._create_artifact('withprops', data=artifact_data) data = [{'op': 'remove', 'path': '/prop1'}] art_updated = self._check_artifact_patch('/withprops/v1/%s' % art['id'], data=data) self.assertIsNone(art_updated['prop1']) self.assertEqual(42, art_updated['prop2']) def test_update_wrong_property_type(self): art = self._create_artifact('withprops') for prop in ['prop2', 'prop2']: self.assertIsNone(art[prop]) data = [{'op': 'replace', 'value': 123, 'path': '/prop1'}] art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=data, status=400) self.assertIn("Property 'prop1' may not have value '123'", art_updated) def test_update_multiple_properties(self): with_prop_art = self._create_artifact('withprops') data = [{'op': 'replace', 'path': '/prop1', 'value': 'some value'}, {'op': 'replace', 'path': '/prop2', 'value': 42}] updated = self._check_artifact_patch( '/withprops/v1/%s' % with_prop_art['id'], data=data) self.assertEqual('some value', updated['prop1']) self.assertEqual(42, updated['prop2']) def test_create_artifact_with_dependency(self): no_prop_art = self._create_artifact('noprop') art = self._create_artifact( 'withprops', data={"name": "name", "version": "42", "depends_on": no_prop_art['id']}) self.assertEqual(no_prop_art['id'], art['depends_on']['id']) self.assertEqual(no_prop_art['name'], art['depends_on']['name']) def test_create_artifact_dependency_list(self): no_prop_art1 = self._create_artifact('noprop') no_prop_art2 = self._create_artifact('noprop') art = self._create_artifact( 'withprops', data={"name": "name", "version": "42", "depends_on_list": [no_prop_art1['id'], no_prop_art2['id']]}) self.assertEqual(2, len(art['depends_on_list'])) self.assertEqual([no_prop_art1['id'], no_prop_art2['id']], map(lambda x: x['id'], art['depends_on_list'])) def test_create_dependency_list_same_id(self): no_prop_art = self._create_artifact('noprop') res = self._create_artifact( 'withprops', data={"name": "name", "version": "42", "depends_on_list": [no_prop_art['id'], no_prop_art['id']]}, status=400) self.assertIn("Items have to be unique", res) def test_create_artifact_bad_dependency_format(self): """Invalid dependencies creation. Dependencies should be passed: * as a list of ids if param is an ArtifactReferenceList * as an id if param is an ArtifactReference """ no_prop_art = self._create_artifact('noprop') art = self._check_artifact_post( '/withprops/v1/drafts', {"name": "name", "version": "42", "depends_on": [no_prop_art['id']]}, status=400) self.assertIn('Not a valid value type', art) art = self._check_artifact_post( '/withprops/v1.0/drafts', {"name": "name", "version": "42", "depends_on_list": no_prop_art['id']}, status=400) self.assertIn('object is not iterable', art) def test_update_dependency(self): no_prop_art = self._create_artifact('noprop') no_prop_art1 = self._create_artifact('noprop') with_prop_art = self._create_artifact('withprops') data = [{'op': 'replace', 'path': '/depends_on', 'value': no_prop_art['id']}] updated = self._check_artifact_patch( '/withprops/v1/%s' % with_prop_art['id'], data=data) self.assertEqual(no_prop_art['id'], updated['depends_on']['id']) self.assertEqual(no_prop_art['name'], updated['depends_on']['name']) data = [{'op': 'replace', 'path': '/depends_on', 'value': no_prop_art1['id']}] # update again and make sure it changes updated = self._check_artifact_patch( '/withprops/v1/%s' % with_prop_art['id'], data=data) self.assertEqual(no_prop_art1['id'], updated['depends_on']['id']) self.assertEqual(no_prop_art1['name'], updated['depends_on']['name']) def test_update_dependency_circular_reference(self): with_prop_art = self._create_artifact('withprops') data = [{'op': 'replace', 'path': '/depends_on', 'value': [with_prop_art['id']]}] not_updated = self._check_artifact_patch( '/withprops/v1/%s' % with_prop_art['id'], data=data, status=400) self.assertIn('Artifact with a circular dependency can not be created', not_updated) def test_publish_artifact(self): art = self._create_artifact('withprops') # now create dependency no_prop_art = self._create_artifact('noprop') art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=[{'value': no_prop_art['id'], 'op': 'replace', 'path': '/depends_on'}]) self.assertNotEqual(0, len(art_updated['depends_on'])) # artifact can't be published if any dependency is in non-active state res = self._check_artifact_post( '/withprops/v1/%s/publish' % art['id'], {}, status=400) self.assertIn("Not all dependencies are in 'active' state", res) # after you publish the dependency -> artifact can be published dep_published = self._check_artifact_post( '/noprop/v1/%s/publish' % no_prop_art['id'], {}, status=200) self.assertEqual('active', dep_published['state']) art_published = self._check_artifact_post( '/withprops/v1.0/%s/publish' % art['id'], {}, status=200) self.assertEqual('active', art_published['state']) def test_no_mutable_change_in_published_state(self): art = self._create_artifact('withprops') no_prop_art = self._create_artifact('noprop') no_prop_other = self._create_artifact('noprop') art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=[{'value': no_prop_art['id'], 'op': 'replace', 'path': '/depends_on'}]) self.assertEqual(no_prop_art['id'], art_updated['depends_on']['id']) # now change dependency to some other artifact art_updated = self._check_artifact_patch( '/withprops/v1/%s' % art['id'], data=[{'value': no_prop_other['id'], 'op': 'replace', 'path': '/depends_on'}]) self.assertEqual(no_prop_other['id'], art_updated['depends_on']['id']) # publish dependency dep_published = self._check_artifact_post( '/noprop/v1/%s/publish' % no_prop_other['id'], {}, status=200) self.assertEqual('active', dep_published['state']) # publish artifact art_published = self._check_artifact_post( '/withprops/v1.0/%s/publish' % art['id'], {}, status=200) self.assertEqual('active', art_published['state']) # try to change dependency, should fail as already published res = self._check_artifact_patch( '/withprops/v1/%s' % art_published['id'], data=[{'op': 'remove', 'path': '/depends_on'}], status=400) self.assertIn('Attempt to set value of immutable property', res) def test_create_artifact_empty_body(self): self._check_artifact_post('/noprop/v1.0/drafts', {}, 400) def test_create_artifact_insufficient_arguments(self): self._check_artifact_post('/noprop/v1.0/drafts', {'name': 'some name, no version'}, status=400) def test_create_artifact_no_such_version(self): """Creation impossible without specifying a correct version. An attempt to create an artifact out of existing plugin but with a wrong version should result in 400 BadRequest 'No such plugin has been loaded' """ # make sure there is no such artifact noprop self._check_artifact_get('/noprop/v0.0.9', 400) artifact_data = {'name': 'artifact-1', 'version': '12'} msg = self._check_artifact_post('/noprop/v0.0.9/drafts', artifact_data, status=400) self.assertIn("No plugin for 'noprop v 0.0.9' has been loaded", msg) def test_create_artifact_no_type_version_specified(self): """Creation impossible without specifying a version. It should not be possible to create an artifact out of existing plugin without specifying any version """ artifact_data = {'name': 'artifact-1', 'version': '12'} self._check_artifact_post('/noprop/drafts', artifact_data, 404) def test_create_artifact_no_properties(self): """Create an artifact with minimum parameters""" artifact_data = {'name': 'artifact-1', 'version': '12'} artifact = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) # verify that all fields have the values expected expected_artifact = { 'state': 'creating', 'name': 'artifact-1', 'version': '12.0.0', 'tags': [], 'visibility': 'private', 'type_name': 'WithProps', 'type_version': '1.0', 'prop1': None, 'prop2': None } for key, value in expected_artifact.items(): self.assertEqual(artifact[key], value, key) def test_create_artifact_with_properties(self): """Create an artifact (with two deployer-defined properties)""" artifact_data = {'name': 'artifact-1', 'version': '12', 'tags': ['gagaga', 'sesese'], 'prop1': 'Arthur Dent', 'prop2': 42} artifact = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) expected_artifact = { 'state': 'creating', 'name': 'artifact-1', 'version': '12.0.0', 'tags': ['gagaga', 'sesese'], 'visibility': 'private', 'type_name': 'WithProps', 'type_version': '1.0', 'prop1': 'Arthur Dent', 'prop2': 42 } for key, value in expected_artifact.items(): self.assertEqual(artifact[key], value, key) def test_create_artifact_not_all_properties(self): """Create artifact with minimal properties. Checks that it is possible to create an artifact by passing all required properties but omitting some not required """ artifact_data = {'name': 'artifact-1', 'version': '12', 'visibility': 'private', 'tags': ['gagaga', 'sesese'], 'prop1': 'i am ok'} artifact = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) expected_artifact = { 'state': 'creating', 'name': 'artifact-1', 'version': '12.0.0', 'tags': ['gagaga', 'sesese'], 'visibility': 'private', 'type_name': 'WithProps', 'type_version': '1.0', 'prop1': 'i am ok', 'prop2': None} for key, value in expected_artifact.items(): self.assertEqual(artifact[key], value, key) # now check creation with no properties specified for prop in ['prop1', 'prop2']: artifact_data.pop(prop, '') artifact = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) for prop in ['prop1', 'prop2']: self.assertIsNone(artifact[prop]) def test_create_artifact_invalid_properties(self): """Any attempt to pass invalid properties should result in 400""" artifact_data = {'name': 'artifact-1', 'version': '12', 'prop1': 1} res = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data, status=400) self.assertIn("Property 'prop1' may not have value '1'", res) artifact_data.pop('prop1') artifact_data['nosuchprop'] = "Random" res = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data, status=400) self.assertIn("Artifact has no property nosuchprop", res) def test_upload_file(self): # Upload some data to an artifact art = self._create_artifact('withblob') headers = self._headers({'Content-Type': 'application/octet-stream'}) self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], headers=headers, data='ZZZZZ', status=200) def test_upload_file_with_invalid_content_type(self): art = self._create_artifact('withblob') data = {'data': 'jjjjjj'} res = self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], data=data, status=400) self.assertIn('Invalid Content-Type for work with blob1', res) res = self._check_artifact_post('/withblob/v1/%s/blob_list' % art['id'], data=data, status=400) self.assertIn('Invalid Content-Type for work with blob_list', res) def test_upload_list_files(self): art = self._create_artifact('withblob') headers = self._headers({'Content-Type': 'application/octet-stream'}) self._check_artifact_post('/withblob/v1/%s/blob_list' % art['id'], headers=headers, data='ZZZZZ', status=200) self._check_artifact_post('/withblob/v1/%s/blob_list' % art['id'], headers=headers, data='YYYYY', status=200) def test_download_file(self): # Download some data from an artifact art = self._create_artifact('withblob') artifact_id = art['id'] headers = self._headers({'Content-Type': 'application/octet-stream'}) self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], headers=headers, data='ZZZZZ', status=200) art = self._check_artifact_get('/withblob/%s' % artifact_id) self.assertEqual(artifact_id, art['id']) self.assertIn('download_link', art['blob1']) data = self._check_artifact_get( '/withblob/%s/blob1/download' % art['id']) self.assertEqual('ZZZZZ', data) def test_file_w_unknown_size(self): # Upload and download data provided by an iterator, thus without # knowing the length in advance art = self._create_artifact('withblob') artifact_id = art['id'] def iterate_string(val): for char in val: yield char headers = self._headers({'Content-Type': 'application/octet-stream'}) self._check_artifact_post('/withblob/v1/%s/blob1' % art['id'], headers=headers, data=iterate_string('ZZZZZ'), status=200) art = self._check_artifact_get('/withblob/%s' % artifact_id) self.assertEqual(artifact_id, art['id']) self.assertIn('download_link', art['blob1']) data = self._check_artifact_get( '/withblob/%s/blob1/download' % art['id']) self.assertEqual('ZZZZZ', data) def test_limit(self): artifact_data = {'name': 'artifact-1', 'version': '12'} self._check_artifact_post('/withprops/v1/drafts', artifact_data) artifact_data = {'name': 'artifact-1', 'version': '13'} self._check_artifact_post('/withprops/v1/drafts', artifact_data) result = self._check_artifact_get('/withprops/v1/drafts') self.assertEqual(2, len(result["artifacts"])) result = self._check_artifact_get('/withprops/v1/drafts?limit=1') self.assertEqual(1, len(result["artifacts"])) def _check_sorting_order(self, expected, actual): for e, a in zip(expected, actual): self.assertEqual(e['name'], a['name']) self.assertEqual(e['version'], a['version']) self.assertEqual(e['prop1'], a['prop1']) def test_sort(self): artifact_data = {'name': 'artifact-1', 'version': '12', 'prop1': 'lala'} art1 = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) artifact_data = {'name': 'artifact-2', 'version': '13', 'prop1': 'lala'} art2 = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) artifact_data = {'name': 'artifact-3', 'version': '13', 'prop1': 'tutu'} art3 = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) artifact_data = {'name': 'artifact-4', 'version': '13', 'prop1': 'hyhy'} art4 = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) artifact_data = {'name': 'artifact-5', 'version': '13', 'prop1': 'bebe'} art5 = self._check_artifact_post('/withprops/v1.0/drafts', artifact_data) result = self._check_artifact_get( '/withprops/v1.0/drafts?sort=name')["artifacts"] self.assertEqual(5, len(result)) # default direction is 'desc' expected = [art5, art4, art3, art2, art1] self._check_sorting_order(expected, result) result = self._check_artifact_get( '/withprops/v1.0/drafts?sort=name:asc')["artifacts"] self.assertEqual(5, len(result)) expected = [art1, art2, art3, art4, art5] self._check_sorting_order(expected, result) result = self._check_artifact_get( '/withprops/v1.0/drafts?sort=version:asc,prop1')["artifacts"] self.assertEqual(5, len(result)) expected = [art1, art3, art2, art4, art5] self._check_sorting_order(expected, result) def test_update_property(self): data = {'name': 'an artifact', 'version': '42'} art = self._create_artifact('withprops', data=data) # update single integer property via PUT upd = self._check_artifact_put('/withprops/v1.0/%s/prop2' % art['id'], data={'data': 15}) self.assertEqual(15, upd['prop2']) # create list property via PUT upd = self._check_artifact_put( '/withprops/v1.0/%s/tuple_prop' % art['id'], data={'data': [42, True]}) self.assertEqual([42, True], upd['tuple_prop']) # change list property via PUT upd = self._check_artifact_put( '/withprops/v1.0/%s/tuple_prop/0' % art['id'], data={'data': 24}) self.assertEqual([24, True], upd['tuple_prop']) # append to list property via POST upd = self._check_artifact_post( '/withprops/v1.0/%s/prop_list' % art['id'], data={'data': [11]}, status=200) self.assertEqual([11], upd['prop_list']) # append to list property via POST upd = self._check_artifact_post( '/withprops/v1.0/%s/prop_list/-' % art['id'], status=200, data={'data': 10}) self.assertEqual([11, 10], upd['prop_list']) def test_bad_update_property(self): data = {'name': 'an artifact', 'version': '42'} art = self._create_artifact('withprops', data=data) # try to update nonexistent property upd = self._check_artifact_put( '/withprops/v1.0/%s/nosuchprop' % art['id'], data={'data': 'wont be set'}, status=400) self.assertIn('Artifact has no property nosuchprop', upd) # try to pass wrong property value upd = self._check_artifact_put( '/withprops/v1.0/%s/tuple_prop' % art['id'], data={'data': ['should be an int', False]}, status=400) self.assertIn("Property 'tuple_prop[0]' may not have value", upd) # try to pass bad body (not a valid json) upd = self._check_artifact_put( '/withprops/v1.0/%s/tuple_prop' % art['id'], data="not a json", status=400) self.assertIn("Invalid json body", upd) # try to pass json body invalid under schema upd = self._check_artifact_put( '/withprops/v1.0/%s/tuple_prop' % art['id'], data={"bad": "schema"}, status=400) self.assertIn("Invalid json body", upd) def test_update_different_depths_levels(self): data = {'name': 'an artifact', 'version': '42'} art = self._create_artifact('withprops', data=data) upd = self._check_artifact_post( '/withprops/v1.0/%s/dict_prop' % art['id'], data={'data': {'foo': 'some value'}}, status=200) self.assertEqual({'foo': 'some value'}, upd['dict_prop']) upd = self._check_artifact_post( '/withprops/v1.0/%s/dict_prop/bar_list' % art['id'], data={'data': [5]}, status=200) self.assertEqual({'foo': 'some value', 'bar_list': [5]}, upd['dict_prop']) upd = self._check_artifact_post( '/withprops/v1.0/%s/dict_prop/bar_list/0' % art['id'], data={'data': 15}, status=200) self.assertEqual({'foo': 'some value', 'bar_list': [5, 15]}, upd['dict_prop']) # try to attempt dict_property by nonexistent path upd = self._check_artifact_post( '/withprops/v1.0/%s/dict_prop/bar_list/nosuchkey' % art['id'], data={'data': 15}, status=400) def test_artifact_inaccessible_by_different_user(self): data = {'name': 'an artifact', 'version': '42'} art = self._create_artifact('withprops', data=data) self._set_user('user2') self._check_artifact_get('/withprops/%s' % art['id'], 404) def test_artifact_accessible_by_admin(self): data = {'name': 'an artifact', 'version': '42'} art = self._create_artifact('withprops', data=data) self._set_user('admin') self._check_artifact_get('/withprops/%s' % art['id'], 200) def test_public_artifact_accessible_by_different_user(self): data = {'name': 'an artifact', 'version': '42'} art = self._create_artifact('withprops', data=data) self._check_artifact_patch( '/withprops/v1.0/%s' % art['id'], data=[{'op': 'replace', 'value': 'public', 'path': '/visibility'}]) self._set_user('user2') self._check_artifact_get('/withprops/%s' % art['id'], 200) def test_public_artifact_not_editable_by_different_user(self): data = {'name': 'an artifact', 'version': '42'} art = self._create_artifact('withprops', data=data) self._check_artifact_patch( '/withprops/v1.0/%s' % art['id'], data=[{'op': 'replace', 'value': 'public', 'path': '/visibility'}]) self._set_user('user2') self._check_artifact_patch( '/withprops/v1.0/%s' % art['id'], data=[{'op': 'replace', 'value': 'private', 'path': '/visibility'}], status=403) def test_public_artifact_editable_by_admin(self): data = {'name': 'an artifact', 'version': '42'} art = self._create_artifact('withprops', data=data) self._check_artifact_patch( '/withprops/v1.0/%s' % art['id'], data=[{'op': 'replace', 'value': 'public', 'path': '/visibility'}]) self._set_user('admin') self._check_artifact_patch( '/withprops/v1.0/%s' % art['id'], data=[{'op': 'replace', 'value': 'private', 'path': '/visibility'}], status=200) def test_list_artifact_types(self): actual = { u'artifact_types': [ {u'displayed_name': u'NoProp', u'type_name': u'NoProp', u'versions': [{u'id': u'v0.5', u'link': u'http://127.0.0.1:%d/v0.1/' u'artifacts/noprop/v0.5' % self.api_port}, {u'id': u'v1.0', u'link': u'http://127.0.0.1:%d/v0.1/' u'artifacts/noprop/v1.0' % self.api_port}]}, {u'displayed_name': u'WithBlob', u'type_name': u'WithBlob', u'versions': [{u'id': u'v1.0', u'link': u'http://127.0.0.1:%d/v0.1/artifacts/withblob/v1.0' % self.api_port}]}, {u'displayed_name': u'WithProps', u'type_name': u'WithProps', u'versions': [{u'id': u'v1.0', u'link': u'http://127.0.0.1:%d/v0.1/artifacts/withprops/v1.0' % self.api_port}]}]} response = self._check_artifact_get("", status=200) response[u'artifact_types'].sort(key=lambda x: x[u'type_name']) for artifact_type in response[u'artifact_types']: artifact_type[u'versions'].sort(key=lambda x: x[u'id']) self.assertEqual(actual, response) def test_invalid_content_type(self): data = {'name': 'name1', 'version': '2.2'} self._check_artifact_post('/withprops/v1.0/drafts', data=data, status=400, headers={'Content-Type': 'lalala'}) def test_filter_by_non_dict_props(self): data = {'name': 'art1', 'version': '4.2', 'prop2': 12 } self._create_artifact('withprops', data=data) data = {'name': 'art2', 'version': '4.2', 'prop2': 10 } self._create_artifact('withprops', data=data) data = {'name': 'art3', 'version': '4.2', 'prop2': 10 } self._create_artifact('withprops', data=data) data = {'name': 'art4', 'version': '4.3', 'prop2': 33 } self._create_artifact('withprops', data=data) result = self._check_artifact_get( '/withprops/v1.0/drafts?name=art2')['artifacts'] self.assertEqual(1, len(result)) result = self._check_artifact_get( '/withprops/v1.0/drafts?prop2=10')['artifacts'] self.assertEqual(2, len(result)) def test_filter_by_dict_props(self): data = {'name': 'art1', 'version': '4.2', 'dict_prop': {'foo': 'Moscow', 'bar_list': [42, 44]} } self._create_artifact('withprops', data=data) data = {'name': 'art2', 'version': '4.2', 'dict_prop': {'foo': 'Saratov', 'bar_list': [42, 42]} } self._create_artifact('withprops', data=data) data = {'name': 'art3', 'version': '4.2', 'dict_prop': {'foo': 'Saratov', 'bar_list': [42, 44]} } self._create_artifact('withprops', data=data) url = '/withprops/v1.0/drafts?dict_prop.foo=Saratov' result = self._check_artifact_get(url=url) self.assertEqual(2, len(result)) url = '/withprops/v1.0/drafts?dict_prop.bar_list=44' result = self._check_artifact_get(url=url) self.assertEqual(2, len(result)) def test_transformation_versions(self): data = {'name': 'art1', 'version': '1'} art1 = self._create_artifact('noprop', data=data) data = {'name': 'art2', 'version': '1.0'} art2 = self._create_artifact('noprop', data=data) v1 = art1.get("version") v2 = art2.get("version") self.assertEqual('1.0.0', v1) self.assertEqual('1.0.0', v2) def test_filter_by_ge_version(self): data = {'name': 'art1', 'version': '4.0.0'} art1 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.0.1'} art2 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-1'} art3 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-2'} art4 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0'} art5 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '5.0.0'} art6 = self._create_artifact('noprop', data=data) url = '/noprop/v1.0/drafts?name=art1&version=ge:4.0.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ge:4.0.1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art2, art3, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0-1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art3, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0-2' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ge:5.0.0' result = self._check_artifact_get(url=url)['artifacts'] actual = [art6] self.assertEqual(actual, result) def test_filter_by_gt_version(self): data = {'name': 'art1', 'version': '4.0.0'} self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.0.1'} art2 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-1'} art3 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-2'} art4 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0'} art5 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '5.0.0'} art6 = self._create_artifact('noprop', data=data) url = '/noprop/v1.0/drafts?name=art1&version=gt:4.0.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art2, art3, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=gt:4.0.1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art3, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=gt:4.2.0-1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=gt:4.2.0-2' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=gt:4.2.0' result = self._check_artifact_get(url=url)['artifacts'] actual = [art6] self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=gt:5.0.0' result = self._check_artifact_get(url=url)['artifacts'] actual = [] self.assertEqual(actual, result) def test_filter_by_le_version(self): data = {'name': 'art1', 'version': '4.0.0'} art1 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.0.1'} art2 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-1'} art3 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-2'} art4 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0'} art5 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '5.0.0'} art6 = self._create_artifact('noprop', data=data) url = '/noprop/v1.0/drafts?name=art1&version=le:4.0.0' result = self._check_artifact_get(url=url)['artifacts'] actual = [art1] self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=le:4.0.1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0-1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0-2' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art4] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art4, art5] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=le:5.0.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) def test_filter_by_lt_version(self): data = {'name': 'art1', 'version': '4.0.0'} art1 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.0.1'} art2 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-1'} art3 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-2'} art4 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0'} art5 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '5.0.0'} self._create_artifact('noprop', data=data) url = '/noprop/v1.0/drafts?name=art1&version=lt:4.0.0' result = self._check_artifact_get(url=url)['artifacts'] actual = [] self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=lt:4.0.1' result = self._check_artifact_get(url=url)['artifacts'] actual = [art1] self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=lt:4.2.0-1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=lt:4.2.0-2' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=lt:4.2.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art4] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=lt:5.0.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art4, art5] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) def test_filter_by_ne_version(self): data = {'name': 'art1', 'version': '4.0.0'} art1 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.0.1'} art2 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-1'} art3 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-2'} art4 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0'} art5 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '5.0.0'} art6 = self._create_artifact('noprop', data=data) url = '/noprop/v1.0/drafts?name=art1&version=ne:4.0.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art2, art3, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ne:4.0.1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art3, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ne:4.2.0-1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art4, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ne:4.2.0-2' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art5, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ne:4.2.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art4, art6] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ne:5.0.0' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2, art3, art4, art5] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) def test_filter_by_pre_release_version(self): data = {'name': 'art1', 'version': '4.2.0-1'} art1 = self._create_artifact('noprop', data=data) data = {'name': 'art1', 'version': '4.2.0-2'} art2 = self._create_artifact('noprop', data=data) url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0-2' result = self._check_artifact_get(url=url)['artifacts'] actual = [art2] self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0-2' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=ge:4.2.0-1' result = self._check_artifact_get(url=url)['artifacts'] result.sort(key=lambda x: x['id']) actual = [art1, art2] actual.sort(key=lambda x: x['id']) self.assertEqual(actual, result) url = '/noprop/v1.0/drafts?name=art1&version=le:4.2.0-1' result = self._check_artifact_get(url=url)['artifacts'] actual = [art1] self.assertEqual(actual, result) def test_filter_by_range_props(self): data = {'name': 'art1', 'version': '4.2', 'prop2': 10 } self._create_artifact('withprops', data=data) data = {'name': 'art2', 'version': '4.2', 'prop2': 100 } self._create_artifact('withprops', data=data) data = {'name': 'art3', 'version': '4.2', 'prop2': 1000 } self._create_artifact('withprops', data=data) url = '/withprops/v1.0/drafts?prop2=gt:99&prop2=lt:101' result = self._check_artifact_get(url=url)['artifacts'] self.assertEqual(1, len(result)) url = '/withprops/v1.0/drafts?prop2=gt:99&prop2=lt:2000' result = self._check_artifact_get(url=url)['artifacts'] self.assertEqual(2, len(result)) def test_filter_by_tags(self): data = {'name': 'art1', 'version': '4.2', 'tags': ['hyhyhy', 'tytyty'] } self._create_artifact('withprops', data=data) data = {'name': 'art2', 'version': '4.2', 'tags': ['hyhyhy', 'cicici'] } self._create_artifact('withprops', data=data) data = {'name': 'art3', 'version': '4.2', 'tags': ['ededed', 'bobobo'] } self._create_artifact('withprops', data=data) url = '/withprops/v1.0/drafts?tags=hyhyhy' result = self._check_artifact_get(url=url)['artifacts'] self.assertEqual(2, len(result)) url = '/withprops/v1.0/drafts?tags=cicici&tags=hyhyhy' result = self._check_artifact_get(url=url)['artifacts'] self.assertEqual(1, len(result)) def test_filter_by_latest_version(self): data = {'name': 'art1', 'version': '1.2', 'tags': ['hyhyhy', 'tytyty'] } self._create_artifact('withprops', data=data) data = {'name': 'latest_artifact', 'version': '3.2', 'tags': ['hyhyhy', 'cicici'] } self._create_artifact('withprops', data=data) data = {'name': 'latest_artifact', 'version': '3.2', 'tags': ['ededed', 'bobobo'] } self._create_artifact('withprops', data=data) url = '/withprops/v1.0/drafts?version=latest&name=latest_artifact' result = self._check_artifact_get(url=url) self.assertEqual(2, len(result)) url = '/withprops/v1.0/drafts?version=latest' self._check_artifact_get(url=url, status=400) def test_filter_by_version_only(self): data = {'name': 'art1', 'version': '3.2' } self._create_artifact('withprops', data=data) data = {'name': 'art2', 'version': '4.2' } self._create_artifact('withprops', data=data) data = {'name': 'art3', 'version': '4.3' } self._create_artifact('withprops', data=data) url = '/withprops/v1.0/drafts?version=gt:4.0&version=lt:10.1' result = self._check_artifact_get(url=url)['artifacts'] self.assertEqual(2, len(result)) url = '/withprops/v1.0/drafts?version=gt:4.0&version=ne:4.3' result = self._check_artifact_get(url=url)['artifacts'] self.assertEqual(1, len(result)) def test_operation_patch_with_blob(self): data = {'name': 'art1', 'version': '3.2' } art = self._create_artifact('withblob', data=data) msg = 'Invalid request PATCH for work with blob' result = self._check_artifact_patch( '/withblob/v1.0/%s' % art['id'], status=400, data=[{'op': 'replace', 'value': 'public', 'path': '/blob1'}]) self.assertIn(msg, result) result = self._check_artifact_patch( '/withblob/v1.0/%s' % art['id'], status=400, data=[{'op': 'remove', 'value': 'public', 'path': '/blob1'}]) self.assertIn(msg, result) result = self._check_artifact_patch( '/withblob/v1.0/%s' % art['id'], status=400, data=[{'op': 'add', 'value': 'public', 'path': '/blob1'}]) self.assertIn(msg, result) def test_filter_by_bad_version(self): bad_versions = ['kkk', '1.k', 'h.0', '1.3.hf', 's.9.2s2'] response_string = ('The format of the version %s is not valid. ' 'Use semver notation') for bad_version in bad_versions: url = '/withprops/v1.0/drafts?version=gt:%s' % bad_version result = self._check_artifact_get(url=url, status=400) self.assertIn(response_string % bad_version, result) def test_circular_dependency(self): data = {'name': 'artifact', 'version': '12'} art = self._create_artifact('withprops', data=data) upd = self._check_artifact_post( '/withprops/v1.0/%s/depends_on' % art['id'], data={'data': art['id']}, status=400) self.assertIn( 'Artifact with a circular dependency can not be created', upd) glance-12.0.0/glance/tests/functional/test_client_exceptions.py0000664000567000056710000001045512701407047026060 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test asserting strongly typed exceptions from glance client""" import eventlet.patcher import httplib2 import webob.dec import webob.exc from glance.common import client from glance.common import exception from glance.common import wsgi from glance.tests import functional from glance.tests import utils eventlet.patcher.monkey_patch(socket=True) class ExceptionTestApp(object): """ Test WSGI application which can respond with multiple kinds of HTTP status codes """ @webob.dec.wsgify def __call__(self, request): path = request.path_qs if path == "/rate-limit": request.response = webob.exc.HTTPRequestEntityTooLarge() elif path == "/rate-limit-retry": request.response.retry_after = 10 request.response.status = 413 elif path == "/service-unavailable": request.response = webob.exc.HTTPServiceUnavailable() elif path == "/service-unavailable-retry": request.response.retry_after = 10 request.response.status = 503 elif path == "/expectation-failed": request.response = webob.exc.HTTPExpectationFailed() elif path == "/server-error": request.response = webob.exc.HTTPServerError() elif path == "/server-traceback": raise exception.ServerError() class TestClientExceptions(functional.FunctionalTest): def setUp(self): super(TestClientExceptions, self).setUp() self.port = utils.get_unused_port() server = wsgi.Server() self.config(bind_host='127.0.0.1') self.config(workers=0) server.start(ExceptionTestApp(), self.port) self.client = client.BaseClient("127.0.0.1", self.port) def _do_test_exception(self, path, exc_type): try: self.client.do_request("GET", path) self.fail('expected %s' % exc_type) except exc_type as e: if 'retry' in path: self.assertEqual(10, e.retry_after) def test_rate_limited(self): """ Test rate limited response """ self._do_test_exception('/rate-limit', exception.LimitExceeded) def test_rate_limited_retry(self): """ Test rate limited response with retry """ self._do_test_exception('/rate-limit-retry', exception.LimitExceeded) def test_service_unavailable(self): """ Test service unavailable response """ self._do_test_exception('/service-unavailable', exception.ServiceUnavailable) def test_service_unavailable_retry(self): """ Test service unavailable response with retry """ self._do_test_exception('/service-unavailable-retry', exception.ServiceUnavailable) def test_expectation_failed(self): """ Test expectation failed response """ self._do_test_exception('/expectation-failed', exception.UnexpectedStatus) def test_server_error(self): """ Test server error response """ self._do_test_exception('/server-error', exception.ServerError) def test_server_traceback(self): """ Verify that the wsgi server does not return tracebacks to the client on 500 errors (bug 1192132) """ http = httplib2.Http() path = ('http://%s:%d/server-traceback' % ('127.0.0.1', self.port)) response, content = http.request(path, 'GET') self.assertNotIn(b'ServerError', content) self.assertEqual(500, response.status) glance-12.0.0/glance/tests/functional/test_gzip_middleware.py0000664000567000056710000000323112701407047025501 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests gzip middleware.""" import httplib2 from glance.tests import functional from glance.tests import utils class GzipMiddlewareTest(functional.FunctionalTest): @utils.skip_if_disabled def test_gzip_requests(self): self.cleanup() self.start_servers(**self.__dict__.copy()) def request(path, headers=None): # We don't care what version we're using here so, # sticking with latest url = 'http://127.0.0.1:%s/v2/%s' % (self.api_port, path) http = httplib2.Http() return http.request(url, 'GET', headers=headers) # Accept-Encoding: Identity headers = {'Accept-Encoding': 'identity'} response, content = request('images', headers=headers) self.assertIsNone(response.get("-content-encoding")) # Accept-Encoding: gzip headers = {'Accept-Encoding': 'gzip'} response, content = request('images', headers=headers) self.assertEqual('gzip', response.get("-content-encoding")) self.stop_servers() glance-12.0.0/glance/tests/functional/__init__.py0000664000567000056710000010170412701407047023037 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base test class for running non-stubbed tests (functional tests) The FunctionalTest class contains helper methods for starting the API and Registry server, grabbing the logs of each, cleaning up pidfiles, and spinning down the servers. """ import atexit import datetime import logging import os import platform import shutil import signal import socket import sys import tempfile import time import fixtures from oslo_serialization import jsonutils # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import six.moves.urllib.parse as urlparse import testtools from glance.common import utils from glance.db.sqlalchemy import api as db_api from glance import tests as glance_tests from glance.tests import utils as test_utils execute, get_unused_port = test_utils.execute, test_utils.get_unused_port tracecmd_osmap = {'Linux': 'strace', 'FreeBSD': 'truss'} class Server(object): """ Class used to easily manage starting and stopping a server during functional test runs. """ def __init__(self, test_dir, port, sock=None): """ Creates a new Server object. :param test_dir: The directory where all test stuff is kept. This is passed from the FunctionalTestCase. :param port: The port to start a server up on. """ self.verbose = True self.debug = True self.no_venv = False self.test_dir = test_dir self.bind_port = port self.conf_file_name = None self.conf_base = None self.paste_conf_base = None self.exec_env = None self.deployment_flavor = '' self.show_image_direct_url = False self.show_multiple_locations = False self.property_protection_file = '' self.enable_v1_api = True self.enable_v2_api = True self.enable_v1_registry = True self.enable_v2_registry = True self.needs_database = False self.log_file = None self.sock = sock self.fork_socket = True self.process_pid = None self.server_module = None self.stop_kill = False self.use_user_token = True self.send_identity_credentials = False def write_conf(self, **kwargs): """ Writes the configuration file for the server to its intended destination. Returns the name of the configuration file and the over-ridden config content (may be useful for populating error messages). """ if not self.conf_base: raise RuntimeError("Subclass did not populate config_base!") conf_override = self.__dict__.copy() if kwargs: conf_override.update(**kwargs) # A config file and paste.ini to use just for this test...we don't want # to trample on currently-running Glance servers, now do we? conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name) if os.path.exists(conf_filepath): os.unlink(conf_filepath) paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini") if os.path.exists(paste_conf_filepath): os.unlink(paste_conf_filepath) utils.safe_mkdirs(conf_dir) def override_conf(filepath, overridden): with open(filepath, 'w') as conf_file: conf_file.write(overridden) conf_file.flush() return conf_file.name overridden_core = self.conf_base % conf_override self.conf_file_name = override_conf(conf_filepath, overridden_core) overridden_paste = '' if self.paste_conf_base: overridden_paste = self.paste_conf_base % conf_override override_conf(paste_conf_filepath, overridden_paste) overridden = ('==Core config==\n%s\n==Paste config==\n%s' % (overridden_core, overridden_paste)) return self.conf_file_name, overridden def start(self, expect_exit=True, expected_exitcode=0, **kwargs): """ Starts the server. Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ # Ensure the configuration file is written self.write_conf(**kwargs) self.create_database() cmd = ("%(server_module)s --config-file %(conf_file_name)s" % {"server_module": self.server_module, "conf_file_name": self.conf_file_name}) cmd = "%s -m %s" % (sys.executable, cmd) # close the sock and release the unused port closer to start time if self.exec_env: exec_env = self.exec_env.copy() else: exec_env = {} pass_fds = set() if self.sock: if not self.fork_socket: self.sock.close() self.sock = None else: fd = os.dup(self.sock.fileno()) exec_env[utils.GLANCE_TEST_SOCKET_FD_STR] = str(fd) pass_fds.add(fd) self.sock.close() self.process_pid = test_utils.fork_exec(cmd, logfile=os.devnull, exec_env=exec_env, pass_fds=pass_fds) self.stop_kill = not expect_exit if self.pid_file: pf = open(self.pid_file, 'w') pf.write('%d\n' % self.process_pid) pf.close() if not expect_exit: rc = 0 try: os.kill(self.process_pid, 0) except OSError: raise RuntimeError("The process did not start") else: rc = test_utils.wait_for_fork( self.process_pid, expected_exitcode=expected_exitcode) # avoid an FD leak if self.sock: os.close(fd) self.sock = None return (rc, '', '') def reload(self, expect_exit=True, expected_exitcode=0, **kwargs): """ Start and stop the service to reload Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ self.stop() return self.start(expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) def create_database(self): """Create database if required for this server""" if self.needs_database: conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') with open(conf_filepath, 'w') as conf_file: conf_file.write('[DEFAULT]\n') conf_file.write('sql_connection = %s' % self.sql_connection) conf_file.flush() glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' if glance_db_env in os.environ: # use the empty db created and cached as a tempfile # instead of spending the time creating a new one db_location = os.environ[glance_db_env] os.system('cp %s %s/tests.sqlite' % (db_location, self.test_dir)) else: cmd = ('%s -m glance.cmd.manage --config-file %s db sync' % (sys.executable, conf_filepath)) execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env, expect_exit=True) # copy the clean db to a temp location so that it # can be reused for future tests (osf, db_location) = tempfile.mkstemp() os.close(osf) os.system('cp %s/tests.sqlite %s' % (self.test_dir, db_location)) os.environ[glance_db_env] = db_location # cleanup the temp file when the test suite is # complete def _delete_cached_db(): try: os.remove(os.environ[glance_db_env]) except Exception: glance_tests.logger.exception( "Error cleaning up the file %s" % os.environ[glance_db_env]) atexit.register(_delete_cached_db) def stop(self): """ Spin down the server. """ if not self.process_pid: raise Exception('why is this being called? %s' % self.server_name) if self.stop_kill: os.kill(self.process_pid, signal.SIGTERM) rc = test_utils.wait_for_fork(self.process_pid, raise_error=False) return (rc, '', '') def dump_log(self, name): log = logging.getLogger(name) if not self.log_file or not os.path.exists(self.log_file): return fptr = open(self.log_file, 'r') for line in fptr: log.info(line.strip()) class ApiServer(Server): """ Server object that starts/stops/manages the API server """ def __init__(self, test_dir, port, policy_file, delayed_delete=False, pid_file=None, sock=None, **kwargs): super(ApiServer, self).__init__(test_dir, port, sock=sock) self.server_name = 'api' self.server_module = 'glance.cmd.%s' % self.server_name self.default_store = kwargs.get("default_store", "file") self.key_file = "" self.cert_file = "" self.metadata_encryption_key = "012345678901234567890123456789ab" self.image_dir = os.path.join(self.test_dir, "images") self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid") self.log_file = os.path.join(self.test_dir, "api.log") self.image_size_cap = 1099511627776 self.delayed_delete = delayed_delete self.owner_is_tenant = True self.workers = 0 self.scrub_time = 5 self.image_cache_dir = os.path.join(self.test_dir, 'cache') self.image_cache_driver = 'sqlite' self.policy_file = policy_file self.policy_default_rule = 'default' self.property_protection_rule_format = 'roles' self.image_member_quota = 10 self.image_property_quota = 10 self.image_tag_quota = 10 self.image_location_quota = 2 self.disable_path = None self.needs_database = True default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', default_sql_connection) self.data_api = kwargs.get("data_api", "glance.db.sqlalchemy.api") self.user_storage_quota = '0' self.lock_path = self.test_dir self.location_strategy = 'location_order' self.store_type_location_strategy_preference = "" self.send_identity_headers = False self.conf_base = """[DEFAULT] verbose = %(verbose)s debug = %(debug)s default_log_levels = eventlet.wsgi.server=DEBUG bind_host = 127.0.0.1 bind_port = %(bind_port)s key_file = %(key_file)s cert_file = %(cert_file)s metadata_encryption_key = %(metadata_encryption_key)s registry_host = 127.0.0.1 registry_port = %(registry_port)s use_user_token = %(use_user_token)s send_identity_credentials = %(send_identity_credentials)s log_file = %(log_file)s image_size_cap = %(image_size_cap)d delayed_delete = %(delayed_delete)s owner_is_tenant = %(owner_is_tenant)s workers = %(workers)s scrub_time = %(scrub_time)s send_identity_headers = %(send_identity_headers)s image_cache_dir = %(image_cache_dir)s image_cache_driver = %(image_cache_driver)s data_api = %(data_api)s sql_connection = %(sql_connection)s show_image_direct_url = %(show_image_direct_url)s show_multiple_locations = %(show_multiple_locations)s user_storage_quota = %(user_storage_quota)s enable_v1_api = %(enable_v1_api)s enable_v2_api = %(enable_v2_api)s lock_path = %(lock_path)s property_protection_file = %(property_protection_file)s property_protection_rule_format = %(property_protection_rule_format)s image_member_quota=%(image_member_quota)s image_property_quota=%(image_property_quota)s image_tag_quota=%(image_tag_quota)s image_location_quota=%(image_location_quota)s location_strategy=%(location_strategy)s allow_additional_image_properties = True [oslo_policy] policy_file = %(policy_file)s policy_default_rule = %(policy_default_rule)s [paste_deploy] flavor = %(deployment_flavor)s [store_type_location_strategy] store_type_preference = %(store_type_location_strategy_preference)s [glance_store] filesystem_store_datadir=%(image_dir)s default_store = %(default_store)s """ self.paste_conf_base = """[pipeline:glance-api] pipeline = cors healthcheck versionnegotiation gzip unauthenticated-context rootapp [pipeline:glance-api-caching] pipeline = cors healthcheck versionnegotiation gzip unauthenticated-context cache rootapp [pipeline:glance-api-cachemanagement] pipeline = cors healthcheck versionnegotiation gzip unauthenticated-context cache cache_manage rootapp [pipeline:glance-api-fakeauth] pipeline = cors healthcheck versionnegotiation gzip fakeauth context rootapp [pipeline:glance-api-noauth] pipeline = cors healthcheck versionnegotiation gzip context rootapp [composite:rootapp] paste.composite_factory = glance.api:root_app_factory /: apiversions /v1: apiv1app /v2: apiv2app [app:apiversions] paste.app_factory = glance.api.versions:create_resource [app:apiv1app] paste.app_factory = glance.api.v1.router:API.factory [app:apiv2app] paste.app_factory = glance.api.v2.router:API.factory [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = %(disable_path)s [filter:versionnegotiation] paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory [filter:gzip] paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory [filter:cache_manage] paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory allowed_origin=http://valid.example.com """ class RegistryServer(Server): """ Server object that starts/stops/manages the Registry server """ def __init__(self, test_dir, port, policy_file, sock=None): super(RegistryServer, self).__init__(test_dir, port, sock=sock) self.server_name = 'registry' self.server_module = 'glance.cmd.%s' % self.server_name self.needs_database = True default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', default_sql_connection) self.pid_file = os.path.join(self.test_dir, "registry.pid") self.log_file = os.path.join(self.test_dir, "registry.log") self.owner_is_tenant = True self.workers = 0 self.api_version = 1 self.user_storage_quota = '0' self.metadata_encryption_key = "012345678901234567890123456789ab" self.policy_file = policy_file self.policy_default_rule = 'default' self.disable_path = None self.conf_base = """[DEFAULT] verbose = %(verbose)s debug = %(debug)s bind_host = 127.0.0.1 bind_port = %(bind_port)s log_file = %(log_file)s sql_connection = %(sql_connection)s sql_idle_timeout = 3600 api_limit_max = 1000 limit_param_default = 25 owner_is_tenant = %(owner_is_tenant)s enable_v2_registry = %(enable_v2_registry)s workers = %(workers)s user_storage_quota = %(user_storage_quota)s metadata_encryption_key = %(metadata_encryption_key)s [oslo_policy] policy_file = %(policy_file)s policy_default_rule = %(policy_default_rule)s [paste_deploy] flavor = %(deployment_flavor)s """ self.paste_conf_base = """[pipeline:glance-registry] pipeline = healthcheck unauthenticated-context registryapp [pipeline:glance-registry-fakeauth] pipeline = healthcheck fakeauth context registryapp [pipeline:glance-registry-trusted-auth] pipeline = healthcheck context registryapp [app:registryapp] paste.app_factory = glance.registry.api:API.factory [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = %(disable_path)s [filter:context] paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory [filter:unauthenticated-context] paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory [filter:fakeauth] paste.filter_factory = glance.tests.utils:FakeAuthMiddleware.factory """ class ScrubberDaemon(Server): """ Server object that starts/stops/manages the Scrubber server """ def __init__(self, test_dir, policy_file, daemon=False, **kwargs): # NOTE(jkoelker): Set the port to 0 since we actually don't listen super(ScrubberDaemon, self).__init__(test_dir, 0) self.server_name = 'scrubber' self.server_module = 'glance.cmd.%s' % self.server_name self.daemon = daemon self.image_dir = os.path.join(self.test_dir, "images") self.scrub_time = 5 self.pid_file = os.path.join(self.test_dir, "scrubber.pid") self.log_file = os.path.join(self.test_dir, "scrubber.log") self.metadata_encryption_key = "012345678901234567890123456789ab" self.lock_path = self.test_dir default_sql_connection = 'sqlite:////%s/tests.sqlite' % self.test_dir self.sql_connection = os.environ.get('GLANCE_TEST_SQL_CONNECTION', default_sql_connection) self.policy_file = policy_file self.policy_default_rule = 'default' self.send_identity_headers = False self.admin_role = 'admin' self.conf_base = """[DEFAULT] verbose = %(verbose)s debug = %(debug)s log_file = %(log_file)s daemon = %(daemon)s wakeup_time = 2 scrub_time = %(scrub_time)s registry_host = 127.0.0.1 registry_port = %(registry_port)s metadata_encryption_key = %(metadata_encryption_key)s lock_path = %(lock_path)s sql_connection = %(sql_connection)s sql_idle_timeout = 3600 send_identity_headers = %(send_identity_headers)s admin_role = %(admin_role)s [glance_store] filesystem_store_datadir=%(image_dir)s [oslo_policy] policy_file = %(policy_file)s policy_default_rule = %(policy_default_rule)s """ def start(self, expect_exit=True, expected_exitcode=0, **kwargs): if 'daemon' in kwargs: expect_exit = False return super(ScrubberDaemon, self).start( expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) class FunctionalTest(test_utils.BaseTestCase): """ Base test class for any test that wants to test the actual servers and clients and not just the stubbed out interfaces """ inited = False disabled = False launched_servers = [] def setUp(self): super(FunctionalTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self.api_protocol = 'http' self.api_port, api_sock = test_utils.get_unused_port_and_socket() self.registry_port, reg_sock = test_utils.get_unused_port_and_socket() self.tracecmd = tracecmd_osmap.get(platform.system()) conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.copy_data_file('schema-image.json', conf_dir) self.copy_data_file('policy.json', conf_dir) self.copy_data_file('property-protections.conf', conf_dir) self.copy_data_file('property-protections-policies.conf', conf_dir) self.property_file_roles = os.path.join(conf_dir, 'property-protections.conf') property_policies = 'property-protections-policies.conf' self.property_file_policies = os.path.join(conf_dir, property_policies) self.policy_file = os.path.join(conf_dir, 'policy.json') self.api_server = ApiServer(self.test_dir, self.api_port, self.policy_file, sock=api_sock) self.registry_server = RegistryServer(self.test_dir, self.registry_port, self.policy_file, sock=reg_sock) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file) self.pid_files = [self.api_server.pid_file, self.registry_server.pid_file, self.scrubber_daemon.pid_file] self.files_to_destroy = [] self.launched_servers = [] def tearDown(self): if not self.disabled: self.cleanup() # We destroy the test data store between each test case, # and recreate it, which ensures that we have no side-effects # from the tests self._reset_database(self.registry_server.sql_connection) self._reset_database(self.api_server.sql_connection) super(FunctionalTest, self).tearDown() self.api_server.dump_log('api_server') self.registry_server.dump_log('registry_server') self.scrubber_daemon.dump_log('scrubber_daemon') def set_policy_rules(self, rules): fap = open(self.policy_file, 'w') fap.write(jsonutils.dumps(rules)) fap.close() def _reset_database(self, conn_string): conn_pieces = urlparse.urlparse(conn_string) if conn_string.startswith('sqlite'): # We leave behind the sqlite DB for failing tests to aid # in diagnosis, as the file size is relatively small and # won't interfere with subsequent tests as it's in a per- # test directory (which is blown-away if the test is green) pass elif conn_string.startswith('mysql'): # We can execute the MySQL client to destroy and re-create # the MYSQL database, which is easier and less error-prone # than using SQLAlchemy to do this via MetaData...trust me. database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" if len(auth_pieces) > 1: if auth_pieces[1].strip(): password = "-p%s" % auth_pieces[1] sql = ("drop database if exists %(database)s; " "create database %(database)s;") % {'database': database} cmd = ("mysql -u%(user)s %(password)s -h%(host)s " "-e\"%(sql)s\"") % {'user': user, 'password': password, 'host': host, 'sql': sql} exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) def cleanup(self): """ Makes sure anything we created or started up in the tests are destroyed or spun down """ # NOTE(jbresnah) call stop on each of the servers instead of # checking the pid file. stop() will wait until the child # server is dead. This eliminates the possibility of a race # between a child process listening on a port actually dying # and a new process being started servers = [self.api_server, self.registry_server, self.scrubber_daemon] for s in servers: try: s.stop() except Exception: pass for f in self.files_to_destroy: if os.path.exists(f): os.unlink(f) def start_server(self, server, expect_launch, expect_exit=True, expected_exitcode=0, **kwargs): """ Starts a server on an unused port. Any kwargs passed to this method will override the configuration value in the conf file used in starting the server. :param server: the server to launch :param expect_launch: true iff the server is expected to successfully start :param expect_exit: true iff the launched process is expected to exit in a timely fashion :param expected_exitcode: expected exitcode from the launcher """ self.cleanup() # Start up the requested server exitcode, out, err = server.start(expect_exit=expect_exit, expected_exitcode=expected_exitcode, **kwargs) if expect_exit: self.assertEqual(expected_exitcode, exitcode, "Failed to spin up the requested server. " "Got: %s" % err) self.launched_servers.append(server) launch_msg = self.wait_for_servers([server], expect_launch) self.assertTrue(launch_msg is None, launch_msg) def start_with_retry(self, server, port_name, max_retries, expect_launch=True, **kwargs): """ Starts a server, with retries if the server launches but fails to start listening on the expected port. :param server: the server to launch :param port_name: the name of the port attribute :param max_retries: the maximum number of attempts :param expect_launch: true iff the server is expected to successfully start :param expect_exit: true iff the launched process is expected to exit in a timely fashion """ launch_msg = None for i in range(max_retries): exitcode, out, err = server.start(expect_exit=not expect_launch, **kwargs) name = server.server_name self.assertEqual(0, exitcode, "Failed to spin up the %s server. " "Got: %s" % (name, err)) launch_msg = self.wait_for_servers([server], expect_launch) if launch_msg: server.stop() server.bind_port = get_unused_port() setattr(self, port_name, server.bind_port) else: self.launched_servers.append(server) break self.assertTrue(launch_msg is None, launch_msg) def start_servers(self, **kwargs): """ Starts the API and Registry servers (glance-control api start & glance-control registry start) on unused ports. glance-control should be installed into the python path Any kwargs passed to this method will override the configuration value in the conf file used in starting the servers. """ self.cleanup() # Start up the API and default registry server # We start the registry server first, as the API server config # depends on the registry port - this ordering allows for # retrying the launch on a port clash self.start_with_retry(self.registry_server, 'registry_port', 3, **kwargs) kwargs['registry_port'] = self.registry_server.bind_port self.start_with_retry(self.api_server, 'api_port', 3, **kwargs) exitcode, out, err = self.scrubber_daemon.start(**kwargs) self.assertEqual(0, exitcode, "Failed to spin up the Scrubber daemon. " "Got: %s" % err) def ping_server(self, port): """ Simple ping on the port. If responsive, return True, else return False. :note We use raw sockets, not ping here, since ping uses ICMP and has no concept of ports... """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(("127.0.0.1", port)) s.close() return True except socket.error: return False def wait_for_servers(self, servers, expect_launch=True, timeout=30): """ Tight loop, waiting for the given server port(s) to be available. Returns when all are pingable. There is a timeout on waiting for the servers to come up. :param servers: Glance server ports to ping :param expect_launch: Optional, true iff the server(s) are expected to successfully start :param timeout: Optional, defaults to 30 seconds :returns: None if launch expectation is met, otherwise an assertion message """ now = datetime.datetime.now() timeout_time = now + datetime.timedelta(seconds=timeout) replied = [] while (timeout_time > now): pinged = 0 for server in servers: if self.ping_server(server.bind_port): pinged += 1 if server not in replied: replied.append(server) if pinged == len(servers): msg = 'Unexpected server launch status' return None if expect_launch else msg now = datetime.datetime.now() time.sleep(0.05) failed = list(set(servers) - set(replied)) msg = 'Unexpected server launch status for: ' for f in failed: msg += ('%s, ' % f.server_name) if os.path.exists(f.pid_file): pid = f.process_pid trace = f.pid_file.replace('.pid', '.trace') if self.tracecmd: cmd = '%s -p %d -o %s' % (self.tracecmd, pid, trace) execute(cmd, raise_error=False, expect_exit=False) time.sleep(0.5) if os.path.exists(trace): msg += ('\n%s:\n%s\n' % (self.tracecmd, open(trace).read())) self.add_log_details(failed) return msg if expect_launch else None def stop_server(self, server, name): """ Called to stop a single server in a normal fashion using the glance-control stop method to gracefully shut the server down. :param server: the server to stop """ # Spin down the requested server server.stop() def stop_servers(self): """ Called to stop the started servers in a normal fashion. Note that cleanup() will stop the servers using a fairly draconian method of sending a SIGTERM signal to the servers. Here, we use the glance-control stop method to gracefully shut the server down. This method also asserts that the shutdown was clean, and so it is meant to be called during a normal test case sequence. """ # Spin down the API and default registry server self.stop_server(self.api_server, 'API server') self.stop_server(self.registry_server, 'Registry server') self.stop_server(self.scrubber_daemon, 'Scrubber daemon') self._reset_database(self.registry_server.sql_connection) def run_sql_cmd(self, sql): """ Provides a crude mechanism to run manual SQL commands for backend DB verification within the functional tests. The raw result set is returned. """ engine = db_api.get_engine() return engine.execute(sql) def copy_data_file(self, file_name, dst_dir): src_file_name = os.path.join('glance/tests/etc', file_name) shutil.copy(src_file_name, dst_dir) dst_file_name = os.path.join(dst_dir, file_name) return dst_file_name def add_log_details(self, servers=None): logs = [s.log_file for s in (servers or self.launched_servers)] for log in logs: if os.path.exists(log): testtools.content.attach_file(self, log) glance-12.0.0/glance/tests/functional/store_utils.py0000664000567000056710000000526312701407047023657 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods to set testcases up for Swift and/or S3 tests. """ from __future__ import print_function import threading from oslo_utils import units from six.moves import BaseHTTPServer FIVE_KB = 5 * units.Ki class RemoteImageHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_HEAD(self): """ Respond to an image HEAD request fake metadata """ if 'images' in self.path: self.send_response(200) self.send_header('Content-Type', 'application/octet-stream') self.send_header('Content-Length', FIVE_KB) self.end_headers() return else: self.send_error(404, 'File Not Found: %s' % self.path) return def do_GET(self): """ Respond to an image GET request with fake image content. """ if 'images' in self.path: self.send_response(200) self.send_header('Content-Type', 'application/octet-stream') self.send_header('Content-Length', FIVE_KB) self.end_headers() image_data = '*' * FIVE_KB self.wfile.write(image_data) self.wfile.close() return else: self.send_error(404, 'File Not Found: %s' % self.path) return def log_message(self, format, *args): """ Simple override to prevent writing crap to stderr... """ pass def setup_http(test): server_class = BaseHTTPServer.HTTPServer remote_server = server_class(('127.0.0.1', 0), RemoteImageHandler) remote_ip, remote_port = remote_server.server_address def serve_requests(httpd): httpd.serve_forever() threading.Thread(target=serve_requests, args=(remote_server,)).start() test.http_server = remote_server test.http_ip = remote_ip test.http_port = remote_port test.addCleanup(test.http_server.shutdown) def get_http_uri(test, image_id): uri = 'http://%(http_ip)s:%(http_port)d/images/' % test.__dict__ uri += image_id return uri glance-12.0.0/glance/tests/functional/test_api.py0000664000567000056710000003061512701407047023112 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Version-independent api tests""" import httplib2 from oslo_serialization import jsonutils from glance.tests import functional class TestApiVersions(functional.FunctionalTest): def test_version_configurations(self): """Test that versioning is handled properly through all channels""" # v1 and v2 api enabled self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d/v%%s/' % self.api_port versions = {'versions': [ { 'id': 'v2.3', 'status': 'CURRENT', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.2', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v1.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '1'}], }, { 'id': 'v1.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '1'}], }, ]} versions_json = jsonutils.dumps(versions) # Verify version choices returned. path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) self.assertEqual(versions_json, content) def test_v2_api_configuration(self): self.api_server.enable_v1_api = False self.api_server.enable_v2_api = True self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d/v%%s/' % self.api_port versions = {'versions': [ { 'id': 'v2.3', 'status': 'CURRENT', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.2', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, ]} versions_json = jsonutils.dumps(versions) # Verify version choices returned. path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) self.assertEqual(versions_json, content) def test_v1_api_configuration(self): self.api_server.enable_v1_api = True self.api_server.enable_v2_api = False self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d/v%%s/' % self.api_port versions = {'versions': [ { 'id': 'v1.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '1'}], }, { 'id': 'v1.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '1'}], }, ]} versions_json = jsonutils.dumps(versions) # Verify version choices returned. path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) self.assertEqual(versions_json, content) class TestApiPaths(functional.FunctionalTest): def setUp(self): super(TestApiPaths, self).setUp() self.start_servers(**self.__dict__.copy()) url = 'http://127.0.0.1:%d/v%%s/' % self.api_port versions = {'versions': [ { 'id': 'v2.3', 'status': 'CURRENT', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.2', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v2.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '2'}], }, { 'id': 'v1.1', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '1'}], }, { 'id': 'v1.0', 'status': 'SUPPORTED', 'links': [{'rel': 'self', 'href': url % '1'}], }, ]} self.versions_json = jsonutils.dumps(versions) images = {'images': []} self.images_json = jsonutils.dumps(images) def test_get_root_path(self): """Assert GET / with `no Accept:` header. Verify version choices returned. Bug lp:803260 no Accept header causes a 500 in glance-api """ path = 'http://%s:%d' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) self.assertEqual(self.versions_json, content) def test_get_images_path(self): """Assert GET /images with `no Accept:` header. Verify version choices returned. """ path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) self.assertEqual(self.versions_json, content) def test_get_v1_images_path(self): """GET /v1/images with `no Accept:` header. Verify empty images list returned. """ path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) def test_get_root_path_with_unknown_header(self): """Assert GET / with Accept: unknown header Verify version choices returned. Verify message in API log about unknown accept header. """ path = 'http://%s:%d/' % ('127.0.0.1', self.api_port) http = httplib2.Http() headers = {'Accept': 'unknown'} response, content = http.request(path, 'GET', headers=headers) self.assertEqual(300, response.status) self.assertEqual(self.versions_json, content) def test_get_root_path_with_openstack_header(self): """Assert GET / with an Accept: application/vnd.openstack.images-v1 Verify empty image list returned """ path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() headers = {'Accept': 'application/vnd.openstack.images-v1'} response, content = http.request(path, 'GET', headers=headers) self.assertEqual(200, response.status) self.assertEqual(self.images_json, content) def test_get_images_path_with_openstack_header(self): """Assert GET /images with a `Accept: application/vnd.openstack.compute-v1` header. Verify version choices returned. Verify message in API log about unknown accept header. """ path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() headers = {'Accept': 'application/vnd.openstack.compute-v1'} response, content = http.request(path, 'GET', headers=headers) self.assertEqual(300, response.status) self.assertEqual(self.versions_json, content) def test_get_v10_images_path(self): """Assert GET /v1.0/images with no Accept: header Verify version choices returned """ path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) def test_get_v1a_images_path(self): """Assert GET /v1.a/images with no Accept: header Verify version choices returned """ path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) def test_get_va1_images_path(self): """Assert GET /va.1/images with no Accept: header Verify version choices returned """ path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) self.assertEqual(self.versions_json, content) def test_get_versions_path(self): """Assert GET /versions with no Accept: header Verify version choices returned """ path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual(self.versions_json, content) def test_get_versions_path_with_openstack_header(self): """Assert GET /versions with the `Accept: application/vnd.openstack.images-v1` header. Verify version choices returned. """ path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port) http = httplib2.Http() headers = {'Accept': 'application/vnd.openstack.images-v1'} response, content = http.request(path, 'GET', headers=headers) self.assertEqual(200, response.status) self.assertEqual(self.versions_json, content) def test_get_v1_versions_path(self): """Assert GET /v1/versions with `no Accept:` header Verify 404 returned """ path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(404, response.status) def test_get_versions_choices(self): """Verify version choices returned""" path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) self.assertEqual(self.versions_json, content) def test_get_images_path_with_openstack_v2_header(self): """Assert GET /images with a `Accept: application/vnd.openstack.compute-v2` header. Verify version choices returned. Verify message in API log about unknown version in accept header. """ path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() headers = {'Accept': 'application/vnd.openstack.images-v10'} response, content = http.request(path, 'GET', headers=headers) self.assertEqual(300, response.status) self.assertEqual(self.versions_json, content) def test_get_v12_images_path(self): """Assert GET /v1.2/images with `no Accept:` header Verify version choices returned """ path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(300, response.status) self.assertEqual(self.versions_json, content) glance-12.0.0/glance/tests/functional/test_logging.py0000664000567000056710000000567412701407047023776 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test case that tests logging output""" import os import stat import httplib2 from glance.tests import functional class TestLogging(functional.FunctionalTest): """Functional tests for Glance's logging output""" def test_verbose_debug(self): """ Test logging output proper when verbose and debug is on. """ self.cleanup() self.start_servers() # The default functional test case has both verbose # and debug on. Let's verify that debug statements # appear in both the API and registry logs. self.assertTrue(os.path.exists(self.api_server.log_file)) api_log_out = open(self.api_server.log_file, 'r').read() self.assertIn('DEBUG glance', api_log_out) self.assertTrue(os.path.exists(self.registry_server.log_file)) registry_log_out = open(self.registry_server.log_file, 'r').read() self.assertIn('DEBUG glance', registry_log_out) self.stop_servers() def test_no_verbose_no_debug(self): """ Test logging output proper when verbose and debug is off. """ self.cleanup() self.start_servers(debug=False, verbose=False) self.assertTrue(os.path.exists(self.api_server.log_file)) api_log_out = open(self.api_server.log_file, 'r').read() self.assertNotIn('DEBUG glance', api_log_out) self.assertTrue(os.path.exists(self.registry_server.log_file)) registry_log_out = open(self.registry_server.log_file, 'r').read() self.assertNotIn('DEBUG glance', registry_log_out) self.stop_servers() def assertNotEmptyFile(self, path): self.assertTrue(os.path.exists(path)) self.assertNotEqual(os.stat(path)[stat.ST_SIZE], 0) def test_logrotate(self): """ Test that we notice when our log file has been rotated """ self.cleanup() self.start_servers() self.assertNotEmptyFile(self.api_server.log_file) os.rename(self.api_server.log_file, self.api_server.log_file + ".1") path = "http://%s:%d/" % ("127.0.0.1", self.api_port) response, content = httplib2.Http().request(path, 'GET') self.assertEqual(300, response.status) self.assertNotEmptyFile(self.api_server.log_file) self.stop_servers() glance-12.0.0/glance/tests/functional/test_reload.py0000664000567000056710000002202312701407047023601 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import time import psutil import requests from glance.tests import functional from glance.tests.utils import execute TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../', 'var')) def set_config_value(filepath, key, value): """Set 'key = value' in config file""" replacement_line = '%s = %s\n' % (key, value) match = re.compile('^%s\s+=' % key).match with open(filepath, 'r+') as f: lines = f.readlines() f.seek(0, 0) f.truncate() for line in lines: f.write(line if not match(line) else replacement_line) class TestReload(functional.FunctionalTest): """Test configuration reload""" def setUp(self): self.workers = 1 super(TestReload, self).setUp() def tearDown(self): self.stop_servers() super(TestReload, self).tearDown() def ticker(self, message, seconds=60, tick=0.01): """ Allows repeatedly testing for an expected result for a finite amount of time. :param message: Message to display on timeout :param seconds: Time in seconds after which we timeout :param tick: Time to sleep before rechecking for expected result :returns: 'True' or fails the test with 'message' on timeout """ # We default to allowing 60 seconds timeout but # typically only a few hundredths of a second # are needed. num_ticks = seconds * (1.0 / tick) count = 0 while count < num_ticks: count += 1 time.sleep(tick) yield self.fail(message) def _get_children(self, server): pid = None pid = self._get_parent(server) process = psutil.Process(pid) children = process.get_children() pids = set() for child in children: pids.add(child.pid) return pids def _get_parent(self, server): if server == 'api': return self.api_server.process_pid elif server == 'registry': return self.registry_server.process_pid def _conffile(self, service): conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, '%s.conf' % service) return conf_filepath def _url(self, protocol, path): return '%s://127.0.0.1:%d%s' % (protocol, self.api_port, path) def test_reload(self): """Test SIGHUP picks up new config values""" def check_pids(pre, post=None, workers=2): if post is None: if len(pre) == workers: return True else: return False if len(post) == workers: # Check new children have different pids if post.intersection(pre) == set(): return True return False self.api_server.fork_socket = False self.registry_server.fork_socket = False self.start_servers(fork_socket=False, **vars(self)) pre_pids = {} post_pids = {} # Test changing the workers value creates all new children # This recycles the existing socket msg = 'Start timeout' for _ in self.ticker(msg): for server in ('api', 'registry'): pre_pids[server] = self._get_children(server) if check_pids(pre_pids['api'], workers=1): if check_pids(pre_pids['registry'], workers=1): break for server in ('api', 'registry'): # Labour costs have fallen set_config_value(self._conffile(server), 'workers', '2') cmd = "kill -HUP %s" % self._get_parent(server) execute(cmd, raise_error=True) msg = 'Worker change timeout' for _ in self.ticker(msg): for server in ('api', 'registry'): post_pids[server] = self._get_children(server) if check_pids(pre_pids['registry'], post_pids['registry']): if check_pids(pre_pids['api'], post_pids['api']): break # Test changing from http to https # This recycles the existing socket path = self._url('http', '/') response = requests.get(path) self.assertEqual(300, response.status_code) del response # close socket so that process audit is reliable pre_pids['api'] = self._get_children('api') key_file = os.path.join(TEST_VAR_DIR, 'privatekey.key') set_config_value(self._conffile('api'), 'key_file', key_file) cert_file = os.path.join(TEST_VAR_DIR, 'certificate.crt') set_config_value(self._conffile('api'), 'cert_file', cert_file) cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'http to https timeout' for _ in self.ticker(msg): post_pids['api'] = self._get_children('api') if check_pids(pre_pids['api'], post_pids['api']): break ca_file = os.path.join(TEST_VAR_DIR, 'ca.crt') path = self._url('https', '/') response = requests.get(path, verify=ca_file) self.assertEqual(300, response.status_code) del response # Test https restart # This recycles the existing socket pre_pids['api'] = self._get_children('api') cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'https restart timeout' for _ in self.ticker(msg): post_pids['api'] = self._get_children('api') if check_pids(pre_pids['api'], post_pids['api']): break ca_file = os.path.join(TEST_VAR_DIR, 'ca.crt') path = self._url('https', '/') response = requests.get(path, verify=ca_file) self.assertEqual(300, response.status_code) del response # Test changing the https bind_host # This requires a new socket pre_pids['api'] = self._get_children('api') set_config_value(self._conffile('api'), 'bind_host', '127.0.0.1') cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'https bind_host timeout' for _ in self.ticker(msg): post_pids['api'] = self._get_children('api') if check_pids(pre_pids['api'], post_pids['api']): break path = self._url('https', '/') response = requests.get(path, verify=ca_file) self.assertEqual(300, response.status_code) del response # Test https -> http # This recycles the existing socket pre_pids['api'] = self._get_children('api') set_config_value(self._conffile('api'), 'key_file', '') set_config_value(self._conffile('api'), 'cert_file', '') cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'https to http timeout' for _ in self.ticker(msg): post_pids['api'] = self._get_children('api') if check_pids(pre_pids['api'], post_pids['api']): break path = self._url('http', '/') response = requests.get(path) self.assertEqual(300, response.status_code) del response # Test changing the http bind_host # This requires a new socket pre_pids['api'] = self._get_children('api') set_config_value(self._conffile('api'), 'bind_host', '127.0.0.1') cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'http bind_host timeout' for _ in self.ticker(msg): post_pids['api'] = self._get_children('api') if check_pids(pre_pids['api'], post_pids['api']): break path = self._url('http', '/') response = requests.get(path) self.assertEqual(300, response.status_code) del response # Test logging configuration change # This recycles the existing socket conf_dir = os.path.join(self.test_dir, 'etc') log_file = conf_dir + 'new.log' self.assertFalse(os.path.exists(log_file)) set_config_value(self._conffile('api'), 'log_file', log_file) cmd = "kill -HUP %s" % self._get_parent('api') execute(cmd, raise_error=True) msg = 'No new log file created' for _ in self.ticker(msg): if os.path.exists(log_file): break glance-12.0.0/glance/tests/functional/test_cache_middleware.py0000664000567000056710000011040012701407047025570 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests a Glance API server which uses the caching middleware that uses the default SQLite cache driver. We use the filesystem store, but that is really not relevant, as the image cache is transparent to the backend store. """ import hashlib import os import shutil import sys import time import httplib2 from oslo_serialization import jsonutils from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.tests import functional from glance.tests.functional.store_utils import get_http_uri from glance.tests.functional.store_utils import setup_http from glance.tests.utils import execute from glance.tests.utils import minimal_headers from glance.tests.utils import skip_if_disabled from glance.tests.utils import xattr_writes_supported FIVE_KB = 5 * units.Ki class BaseCacheMiddlewareTest(object): @skip_if_disabled def test_cache_middleware_transparent_v1(self): """ We test that putting the cache middleware into the application pipeline gives us transparent image caching """ self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) image_id = data['image']['id'] # Verify image not in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) # Grab the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Verify image now in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) # You might wonder why the heck this is here... well, it's here # because it took me forever to figure out that the disk write # cache in Linux was causing random failures of the os.path.exists # assert directly below this. Basically, since the cache is writing # the image file to disk in a different process, the write buffers # don't flush the cache file during an os.rename() properly, resulting # in a false negative on the file existence check below. This little # loop pauses the execution of this process for no more than 1.5 # seconds. If after that time the cached image file still doesn't # appear on disk, something really is wrong, and the assert should # trigger... i = 0 while not os.path.exists(image_cached_path) and i < 30: time.sleep(0.05) i = i + 1 self.assertTrue(os.path.exists(image_cached_path)) # Now, we delete the image from the server and verify that # the image cache no longer contains the deleted image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) self.assertFalse(os.path.exists(image_cached_path)) self.stop_servers() @skip_if_disabled def test_cache_middleware_transparent_v2(self): """Ensure the v2 API image transfer calls trigger caching""" self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify success path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port) http = httplib2.Http() headers = {'content-type': 'application/json'} image_entity = { 'name': 'Image1', 'visibility': 'public', 'container_format': 'bare', 'disk_format': 'raw', } response, content = http.request(path, 'POST', headers=headers, body=jsonutils.dumps(image_entity)) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['id'] path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port, image_id) headers = {'content-type': 'application/octet-stream'} image_data = "*" * FIVE_KB response, content = http.request(path, 'PUT', headers=headers, body=image_data) self.assertEqual(204, response.status) # Verify image not in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) # Grab the image http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Verify image now in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) # Now, we delete the image from the server and verify that # the image cache no longer contains the deleted image path = "http://%s:%d/v2/images/%s" % ("0.0.0.0", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(204, response.status) self.assertFalse(os.path.exists(image_cached_path)) self.stop_servers() @skip_if_disabled def test_cache_remote_image(self): """ We test that caching is no longer broken for remote images """ self.cleanup() self.start_servers(**self.__dict__.copy()) setup_http(self) # Add a remote image and verify a 201 Created is returned remote_uri = get_http_uri(self, '2') headers = {'X-Image-Meta-Name': 'Image2', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Image-Meta-Location': remote_uri} path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual(FIVE_KB, data['image']['size']) image_id = data['image']['id'] path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) # Grab the image http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Grab the image again to ensure it can be served out from # cache with the correct size http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual(FIVE_KB, int(response['content-length'])) self.stop_servers() @skip_if_disabled def test_cache_middleware_trans_v1_without_download_image_policy(self): """ Ensure the image v1 API image transfer applied 'download_image' policy enforcement. """ self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) image_id = data['image']['id'] # Verify image not in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) rules = {"context_is_admin": "role:admin", "default": "", "download_image": "!"} self.set_policy_rules(rules) # Grab the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(403, response.status) # Now, we delete the image from the server and verify that # the image cache no longer contains the deleted image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) self.assertFalse(os.path.exists(image_cached_path)) self.stop_servers() @skip_if_disabled def test_cache_middleware_trans_v2_without_download_image_policy(self): """ Ensure the image v2 API image transfer applied 'download_image' policy enforcement. """ self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify success path = "http://%s:%d/v2/images" % ("0.0.0.0", self.api_port) http = httplib2.Http() headers = {'content-type': 'application/json'} image_entity = { 'name': 'Image1', 'visibility': 'public', 'container_format': 'bare', 'disk_format': 'raw', } response, content = http.request(path, 'POST', headers=headers, body=jsonutils.dumps(image_entity)) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['id'] path = "http://%s:%d/v2/images/%s/file" % ("0.0.0.0", self.api_port, image_id) headers = {'content-type': 'application/octet-stream'} image_data = "*" * FIVE_KB response, content = http.request(path, 'PUT', headers=headers, body=image_data) self.assertEqual(204, response.status) # Verify image not in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertFalse(os.path.exists(image_cached_path)) rules = {"context_is_admin": "role:admin", "default": "", "download_image": "!"} self.set_policy_rules(rules) # Grab the image http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(403, response.status) # Now, we delete the image from the server and verify that # the image cache no longer contains the deleted image path = "http://%s:%d/v2/images/%s" % ("0.0.0.0", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(204, response.status) self.assertFalse(os.path.exists(image_cached_path)) self.stop_servers() @skip_if_disabled def test_cache_middleware_trans_with_deactivated_image(self): """ Ensure the image v1/v2 API image transfer forbids downloading deactivated images. Image deactivation is not available in v1. So, we'll deactivate the image using v2 but test image transfer with both v1 and v2. """ self.cleanup() self.start_servers(**self.__dict__.copy()) # Add an image and verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) image_id = data['image']['id'] # Grab the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Verify image in cache image_cached_path = os.path.join(self.api_server.image_cache_dir, image_id) self.assertTrue(os.path.exists(image_cached_path)) # Deactivate the image using v2 path = "http://%s:%d/v2/images/%s/actions/deactivate" path = path % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'POST') self.assertEqual(204, response.status) # Download the image with v1. Ensure it is forbidden path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(403, response.status) # Download the image with v2. This succeeds because # we are in admin context. path = "http://%s:%d/v2/images/%s/file" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Reactivate the image using v2 path = "http://%s:%d/v2/images/%s/actions/reactivate" path = path % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'POST') self.assertEqual(204, response.status) # Download the image with v1. Ensure it is allowed path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Download the image with v2. Ensure it is allowed path = "http://%s:%d/v2/images/%s/file" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Now, we delete the image from the server and verify that # the image cache no longer contains the deleted image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) self.assertFalse(os.path.exists(image_cached_path)) self.stop_servers() class BaseCacheManageMiddlewareTest(object): """Base test class for testing cache management middleware""" def verify_no_images(self): path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('images', data) self.assertEqual(0, len(data['images'])) def add_image(self, name): """ Adds an image and returns the newly-added image identifier """ image_data = "*" * FIVE_KB headers = minimal_headers('%s' % name) path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual(name, data['image']['name']) self.assertTrue(data['image']['is_public']) return data['image']['id'] def verify_no_cached_images(self): """ Verify no images in the image cache """ path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('cached_images', data) self.assertEqual([], data['cached_images']) @skip_if_disabled def test_user_not_authorized(self): self.cleanup() self.start_servers(**self.__dict__.copy()) self.verify_no_images() image_id1 = self.add_image("Image1") image_id2 = self.add_image("Image2") # Verify image does not yet show up in cache (we haven't "hit" # it yet using a GET /images/1 ... self.verify_no_cached_images() # Grab the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id1) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Verify image now in cache path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('cached_images', data) cached_images = data['cached_images'] self.assertEqual(1, len(cached_images)) self.assertEqual(image_id1, cached_images[0]['image_id']) # Set policy to disallow access to cache management rules = {"manage_image_cache": '!'} self.set_policy_rules(rules) # Verify an unprivileged user cannot see cached images path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(403, response.status) # Verify an unprivileged user cannot delete images from the cache path = "http://%s:%d/v1/cached_images/%s" % ("127.0.0.1", self.api_port, image_id1) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(403, response.status) # Verify an unprivileged user cannot delete all cached images path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(403, response.status) # Verify an unprivileged user cannot queue an image path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1", self.api_port, image_id2) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(403, response.status) self.stop_servers() @skip_if_disabled def test_cache_manage_get_cached_images(self): """ Tests that cached images are queryable """ self.cleanup() self.start_servers(**self.__dict__.copy()) self.verify_no_images() image_id = self.add_image("Image1") # Verify image does not yet show up in cache (we haven't "hit" # it yet using a GET /images/1 ... self.verify_no_cached_images() # Grab the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Verify image now in cache path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('cached_images', data) # Verify the last_modified/last_accessed values are valid floats for cached_image in data['cached_images']: for time_key in ('last_modified', 'last_accessed'): time_val = cached_image[time_key] try: float(time_val) except ValueError: self.fail('%s time %s for cached image %s not a valid ' 'float' % (time_key, time_val, cached_image['image_id'])) cached_images = data['cached_images'] self.assertEqual(1, len(cached_images)) self.assertEqual(image_id, cached_images[0]['image_id']) self.assertEqual(0, cached_images[0]['hits']) # Hit the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) # Verify image hits increased in output of manage GET path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('cached_images', data) cached_images = data['cached_images'] self.assertEqual(1, len(cached_images)) self.assertEqual(image_id, cached_images[0]['image_id']) self.assertEqual(1, cached_images[0]['hits']) self.stop_servers() @skip_if_disabled def test_cache_manage_delete_cached_images(self): """ Tests that cached images may be deleted """ self.cleanup() self.start_servers(**self.__dict__.copy()) self.verify_no_images() ids = {} # Add a bunch of images... for x in range(4): ids[x] = self.add_image("Image%s" % str(x)) # Verify no images in cached_images because no image has been hit # yet using a GET /images/ ... self.verify_no_cached_images() # Grab the images, essentially caching them... for x in range(4): path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, ids[x]) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status, "Failed to find image %s" % ids[x]) # Verify images now in cache path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('cached_images', data) cached_images = data['cached_images'] self.assertEqual(4, len(cached_images)) for x in range(4, 0): # Cached images returned last modified order self.assertEqual(ids[x], cached_images[x]['image_id']) self.assertEqual(0, cached_images[x]['hits']) # Delete third image of the cached images and verify no longer in cache path = "http://%s:%d/v1/cached_images/%s" % ("127.0.0.1", self.api_port, ids[2]) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('cached_images', data) cached_images = data['cached_images'] self.assertEqual(3, len(cached_images)) self.assertNotIn(ids[2], [x['image_id'] for x in cached_images]) # Delete all cached images and verify nothing in cache path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('cached_images', data) cached_images = data['cached_images'] self.assertEqual(0, len(cached_images)) self.stop_servers() @skip_if_disabled def test_cache_manage_delete_queued_images(self): """ Tests that all queued images may be deleted at once """ self.cleanup() self.start_servers(**self.__dict__.copy()) self.verify_no_images() ids = {} NUM_IMAGES = 4 # Add and then queue some images for x in range(NUM_IMAGES): ids[x] = self.add_image("Image%s" % str(x)) path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1", self.api_port, ids[x]) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(200, response.status) # Delete all queued images path = "http://%s:%d/v1/queued_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) data = jsonutils.loads(content) num_deleted = data['num_deleted'] self.assertEqual(NUM_IMAGES, num_deleted) # Verify a second delete now returns num_deleted=0 path = "http://%s:%d/v1/queued_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) data = jsonutils.loads(content) num_deleted = data['num_deleted'] self.assertEqual(0, num_deleted) self.stop_servers() @skip_if_disabled def test_queue_and_prefetch(self): """ Tests that images may be queued and prefetched """ self.cleanup() self.start_servers(**self.__dict__.copy()) cache_config_filepath = os.path.join(self.test_dir, 'etc', 'glance-cache.conf') cache_file_options = { 'image_cache_dir': self.api_server.image_cache_dir, 'image_cache_driver': self.image_cache_driver, 'registry_port': self.registry_server.bind_port, 'log_file': os.path.join(self.test_dir, 'cache.log'), 'metadata_encryption_key': "012345678901234567890123456789ab", 'filesystem_store_datadir': self.test_dir } with open(cache_config_filepath, 'w') as cache_file: cache_file.write("""[DEFAULT] debug = True verbose = True image_cache_dir = %(image_cache_dir)s image_cache_driver = %(image_cache_driver)s registry_host = 127.0.0.1 registry_port = %(registry_port)s metadata_encryption_key = %(metadata_encryption_key)s log_file = %(log_file)s [glance_store] filesystem_store_datadir=%(filesystem_store_datadir)s """ % cache_file_options) self.verify_no_images() ids = {} # Add a bunch of images... for x in range(4): ids[x] = self.add_image("Image%s" % str(x)) # Queue the first image, verify no images still in cache after queueing # then run the prefetcher and verify that the image is then in the # cache path = "http://%s:%d/v1/queued_images/%s" % ("127.0.0.1", self.api_port, ids[0]) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(200, response.status) self.verify_no_cached_images() cmd = ("%s -m glance.cmd.cache_prefetcher --config-file %s" % (sys.executable, cache_config_filepath)) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertEqual('', out.strip(), out) # Verify first image now in cache path = "http://%s:%d/v1/cached_images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertIn('cached_images', data) cached_images = data['cached_images'] self.assertEqual(1, len(cached_images)) self.assertIn(ids[0], [r['image_id'] for r in data['cached_images']]) self.stop_servers() class TestImageCacheXattr(functional.FunctionalTest, BaseCacheMiddlewareTest): """Functional tests that exercise the image cache using the xattr driver""" def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ if getattr(self, 'disabled', False): return if not getattr(self, 'inited', False): try: import xattr # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-xattr not installed.") return self.inited = True self.disabled = False self.image_cache_driver = "xattr" super(TestImageCacheXattr, self).setUp() self.api_server.deployment_flavor = "caching" if not xattr_writes_supported(self.test_dir): self.inited = True self.disabled = True self.disabled_message = ("filesystem does not support xattr") return def tearDown(self): super(TestImageCacheXattr, self).tearDown() if os.path.exists(self.api_server.image_cache_dir): shutil.rmtree(self.api_server.image_cache_dir) class TestImageCacheManageXattr(functional.FunctionalTest, BaseCacheManageMiddlewareTest): """ Functional tests that exercise the image cache management with the Xattr cache driver """ def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ if getattr(self, 'disabled', False): return if not getattr(self, 'inited', False): try: import xattr # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-xattr not installed.") return self.inited = True self.disabled = False self.image_cache_driver = "xattr" super(TestImageCacheManageXattr, self).setUp() self.api_server.deployment_flavor = "cachemanagement" if not xattr_writes_supported(self.test_dir): self.inited = True self.disabled = True self.disabled_message = ("filesystem does not support xattr") return def tearDown(self): super(TestImageCacheManageXattr, self).tearDown() if os.path.exists(self.api_server.image_cache_dir): shutil.rmtree(self.api_server.image_cache_dir) class TestImageCacheSqlite(functional.FunctionalTest, BaseCacheMiddlewareTest): """ Functional tests that exercise the image cache using the SQLite driver """ def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ if getattr(self, 'disabled', False): return if not getattr(self, 'inited', False): try: import sqlite3 # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-sqlite3 not installed.") return self.inited = True self.disabled = False super(TestImageCacheSqlite, self).setUp() self.api_server.deployment_flavor = "caching" def tearDown(self): super(TestImageCacheSqlite, self).tearDown() if os.path.exists(self.api_server.image_cache_dir): shutil.rmtree(self.api_server.image_cache_dir) class TestImageCacheManageSqlite(functional.FunctionalTest, BaseCacheManageMiddlewareTest): """ Functional tests that exercise the image cache management using the SQLite driver """ def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ if getattr(self, 'disabled', False): return if not getattr(self, 'inited', False): try: import sqlite3 # noqa except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-sqlite3 not installed.") return self.inited = True self.disabled = False self.image_cache_driver = "sqlite" super(TestImageCacheManageSqlite, self).setUp() self.api_server.deployment_flavor = "cachemanagement" def tearDown(self): super(TestImageCacheManageSqlite, self).tearDown() if os.path.exists(self.api_server.image_cache_dir): shutil.rmtree(self.api_server.image_cache_dir) glance-12.0.0/glance/tests/functional/test_client_redirects.py0000664000567000056710000001156212701407047025663 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test cases testing glance client redirect-following.""" import eventlet.patcher import webob.dec import webob.exc from glance.common import client from glance.common import exception from glance.common import wsgi from glance.tests import functional from glance.tests import utils eventlet.patcher.monkey_patch(socket=True) def RedirectTestApp(name): class App(object): """ Test WSGI application which can respond with multiple kinds of HTTP redirects and is used to verify Glance client redirects. """ def __init__(self): """ Initialize app with a name and port. """ self.name = name @webob.dec.wsgify def __call__(self, request): """ Handles all requests to the application. """ base = "http://%s" % request.host path = request.path_qs if path == "/": return "root" elif path == "/302": url = "%s/success" % base raise webob.exc.HTTPFound(location=url) elif path == "/302?with_qs=yes": url = "%s/success?with_qs=yes" % base raise webob.exc.HTTPFound(location=url) elif path == "/infinite_302": raise webob.exc.HTTPFound(location=request.url) elif path.startswith("/redirect-to"): url = "http://127.0.0.1:%s/success" % path.split("-")[-1] raise webob.exc.HTTPFound(location=url) elif path == "/success": return "success_from_host_%s" % self.name elif path == "/success?with_qs=yes": return "success_with_qs" return "fail" return App class TestClientRedirects(functional.FunctionalTest): def setUp(self): super(TestClientRedirects, self).setUp() self.port_one = utils.get_unused_port() self.port_two = utils.get_unused_port() server_one = wsgi.Server() server_two = wsgi.Server() self.config(bind_host='127.0.0.1') self.config(workers=0) server_one.start(RedirectTestApp("one")(), self.port_one) server_two.start(RedirectTestApp("two")(), self.port_two) self.client = client.BaseClient("127.0.0.1", self.port_one) def test_get_without_redirect(self): """ Test GET with no redirect """ response = self.client.do_request("GET", "/") self.assertEqual(200, response.status) self.assertEqual("root", response.read()) def test_get_with_one_redirect(self): """ Test GET with one 302 FOUND redirect """ response = self.client.do_request("GET", "/302") self.assertEqual(200, response.status) self.assertEqual("success_from_host_one", response.read()) def test_get_with_one_redirect_query_string(self): """ Test GET with one 302 FOUND redirect w/ a query string """ response = self.client.do_request("GET", "/302", params={'with_qs': 'yes'}) self.assertEqual(200, response.status) self.assertEqual("success_with_qs", response.read()) def test_get_with_max_redirects(self): """ Test we don't redirect forever. """ self.assertRaises(exception.MaxRedirectsExceeded, self.client.do_request, "GET", "/infinite_302") def test_post_redirect(self): """ Test POST with 302 redirect """ response = self.client.do_request("POST", "/302") self.assertEqual(200, response.status) self.assertEqual("success_from_host_one", response.read()) def test_redirect_to_new_host(self): """ Test redirect to one host and then another. """ url = "/redirect-to-%d" % self.port_two response = self.client.do_request("POST", url) self.assertEqual(200, response.status) self.assertEqual("success_from_host_two", response.read()) response = self.client.do_request("POST", "/success") self.assertEqual(200, response.status) self.assertEqual("success_from_host_one", response.read()) glance-12.0.0/glance/tests/functional/test_scrubber.py0000664000567000056710000002701012701407047024143 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import time import httplib2 from oslo_serialization import jsonutils from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.tests import functional from glance.tests.utils import execute TEST_IMAGE_DATA = '*' * 5 * units.Ki TEST_IMAGE_META = { 'name': 'test_image', 'is_public': False, 'disk_format': 'raw', 'container_format': 'ovf', } class TestScrubber(functional.FunctionalTest): """Test that delayed_delete works and the scrubber deletes""" def _send_http_request(self, path, method, body=None): headers = { 'x-image-meta-name': 'test_image', 'x-image-meta-is_public': 'true', 'x-image-meta-disk_format': 'raw', 'x-image-meta-container_format': 'ovf', 'content-type': 'application/octet-stream' } return httplib2.Http().request(path, method, body, headers) def test_delayed_delete(self): """ test that images don't get deleted immediately and that the scrubber scrubs them """ self.cleanup() self.start_servers(delayed_delete=True, daemon=True, metadata_encryption_key='') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) response, content = self._send_http_request(path, 'POST', body='XXX') self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image['id']) response, content = self._send_http_request(path, 'DELETE') self.assertEqual(200, response.status) response, content = self._send_http_request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('pending_delete', response['x-image-meta-status']) self.wait_for_scrub(path) self.stop_servers() def test_delayed_delete_with_trustedauth_registry(self): """ test that images don't get deleted immediately and that the scrubber scrubs them when registry is operating in trustedauth mode """ self.cleanup() self.api_server.deployment_flavor = 'noauth' self.registry_server.deployment_flavor = 'trusted-auth' self.start_servers(delayed_delete=True, daemon=True, metadata_encryption_key='', send_identity_headers=True) base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': 'deae8923-075d-4287-924b-840fb2644874', 'X-Roles': 'admin', } headers = { 'x-image-meta-name': 'test_image', 'x-image-meta-is_public': 'true', 'x-image-meta-disk_format': 'raw', 'x-image-meta-container_format': 'ovf', 'content-type': 'application/octet-stream', } headers.update(base_headers) path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', body='XXX', headers=headers) self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) image_id = image['id'] path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE', headers=base_headers) self.assertEqual(200, response.status) response, content = http.request(path, 'HEAD', headers=base_headers) self.assertEqual(200, response.status) self.assertEqual('pending_delete', response['x-image-meta-status']) self.wait_for_scrub(path, headers=base_headers) self.stop_servers() def test_scrubber_app(self): """ test that the glance-scrubber script runs successfully when not in daemon mode """ self.cleanup() self.start_servers(delayed_delete=True, daemon=False, metadata_encryption_key='') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) response, content = self._send_http_request(path, 'POST', body='XXX') self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image['id']) response, content = self._send_http_request(path, 'DELETE') self.assertEqual(200, response.status) response, content = self._send_http_request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('pending_delete', response['x-image-meta-status']) # wait for the scrub time on the image to pass time.sleep(self.api_server.scrub_time) # scrub images and make sure they get deleted exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s" % (exe_cmd, self.scrubber_daemon.conf_file_name)) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(0, exitcode) self.wait_for_scrub(path) self.stop_servers() def test_scrubber_app_with_trustedauth_registry(self): """ test that the glance-scrubber script runs successfully when not in daemon mode and with a registry that operates in trustedauth mode """ self.cleanup() self.api_server.deployment_flavor = 'noauth' self.registry_server.deployment_flavor = 'trusted-auth' self.start_servers(delayed_delete=True, daemon=False, metadata_encryption_key='', send_identity_headers=True) base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': 'deae8923-075d-4287-924b-840fb2644874', 'X-Roles': 'admin', } headers = { 'x-image-meta-name': 'test_image', 'x-image-meta-is_public': 'true', 'x-image-meta-disk_format': 'raw', 'x-image-meta-container_format': 'ovf', 'content-type': 'application/octet-stream', } headers.update(base_headers) path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', body='XXX', headers=headers) self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) image_id = image['id'] path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE', headers=base_headers) self.assertEqual(200, response.status) response, content = http.request(path, 'HEAD', headers=base_headers) self.assertEqual(200, response.status) self.assertEqual('pending_delete', response['x-image-meta-status']) # wait for the scrub time on the image to pass time.sleep(self.api_server.scrub_time) # scrub images and make sure they get deleted exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s" % (exe_cmd, self.scrubber_daemon.conf_file_name)) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(0, exitcode) self.wait_for_scrub(path, headers=base_headers) self.stop_servers() def test_scrubber_delete_handles_exception(self): """ Test that the scrubber handles the case where an exception occurs when _delete() is called. The scrubber should not write out queue files in this case. """ # Start servers. self.cleanup() self.start_servers(delayed_delete=True, daemon=False, default_store='file') # Check that we are using a file backend. self.assertEqual(self.api_server.default_store, 'file') # add an image path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) response, content = self._send_http_request(path, 'POST', body='XXX') self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) # delete the image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image['id']) response, content = self._send_http_request(path, 'DELETE') self.assertEqual(200, response.status) # ensure the image is marked pending delete response, content = self._send_http_request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('pending_delete', response['x-image-meta-status']) # Remove the file from the backend. file_path = os.path.join(self.api_server.image_dir, image['id']) os.remove(file_path) # Wait for the scrub time on the image to pass time.sleep(self.api_server.scrub_time) # run the scrubber app, and ensure it doesn't fall over exe_cmd = "%s -m glance.cmd.scrubber" % sys.executable cmd = ("%s --config-file %s" % (exe_cmd, self.scrubber_daemon.conf_file_name)) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(0, exitcode) self.wait_for_scrub(path) self.stop_servers() def wait_for_scrub(self, path, headers=None): """ NOTE(jkoelker) The build servers sometimes take longer than 15 seconds to scrub. Give it up to 5 min, checking checking every 15 seconds. When/if it flips to deleted, bail immediately. """ http = httplib2.Http() wait_for = 300 # seconds check_every = 15 # seconds for _ in range(wait_for / check_every): time.sleep(check_every) response, content = http.request(path, 'HEAD', headers=headers) if (response['x-image-meta-status'] == 'deleted' and response['x-image-meta-deleted'] == 'True'): break else: continue else: self.fail('image was never scrubbed') glance-12.0.0/glance/tests/functional/test_wsgi.py0000664000567000056710000000350612701407047023311 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for `glance.wsgi`.""" import socket import time from oslo_config import cfg import testtools from glance.common import wsgi CONF = cfg.CONF class TestWSGIServer(testtools.TestCase): """WSGI server tests.""" def test_client_socket_timeout(self): CONF.set_default("workers", 0) CONF.set_default("client_socket_timeout", 0.1) """Verify connections are timed out as per 'client_socket_timeout'""" greetings = 'Hello, World!!!' def hello_world(env, start_response): start_response('200 OK', [('Content-Type', 'text/plain')]) return [greetings] server = wsgi.Server() server.start(hello_world, 0) port = server.sock.getsockname()[1] def get_request(delay=0.0): sock = socket.socket() sock.connect(('127.0.0.1', port)) time.sleep(delay) sock.send('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n') return sock.recv(1024) # Should succeed - no timeout self.assertIn(greetings, get_request()) # Should fail - connection timed out so we get nothing from the server self.assertFalse(get_request(delay=0.2)) glance-12.0.0/glance/tests/functional/test_sqlite.py0000664000567000056710000000250412701407047023636 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test cases for sqlite-specific logic""" from glance.tests import functional from glance.tests.utils import depends_on_exe from glance.tests.utils import execute from glance.tests.utils import skip_if_disabled class TestSqlite(functional.FunctionalTest): """Functional tests for sqlite-specific logic""" @depends_on_exe('sqlite3') @skip_if_disabled def test_big_int_mapping(self): """Ensure BigInteger not mapped to BIGINT""" self.cleanup() self.start_servers(**self.__dict__.copy()) cmd = "sqlite3 tests.sqlite '.schema'" exitcode, out, err = execute(cmd, raise_error=True) self.assertNotIn('BIGINT', out) self.stop_servers() glance-12.0.0/glance/tests/functional/test_ssl.py0000664000567000056710000000540512701407047023141 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import httplib2 from glance.tests import functional TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'var')) class TestSSL(functional.FunctionalTest): """Functional tests verifying SSL communication""" def setUp(self): super(TestSSL, self).setUp() if getattr(self, 'inited', False): return self.inited = False self.disabled = True # NOTE (stevelle): Test key/cert/CA file created as per: # http://nrocco.github.io/2013/01/25/ # self-signed-ssl-certificate-chains.html # For these tests certificate.crt must be created with 'Common Name' # set to 127.0.0.1 self.key_file = os.path.join(TEST_VAR_DIR, 'privatekey.key') if not os.path.exists(self.key_file): self.disabled_message = ("Could not find private key file %s" % self.key_file) self.inited = True return self.cert_file = os.path.join(TEST_VAR_DIR, 'certificate.crt') if not os.path.exists(self.cert_file): self.disabled_message = ("Could not find certificate file %s" % self.cert_file) self.inited = True return self.ca_file = os.path.join(TEST_VAR_DIR, 'ca.crt') if not os.path.exists(self.ca_file): self.disabled_message = ("Could not find CA file %s" % self.ca_file) self.inited = True return self.inited = True self.disabled = False def tearDown(self): super(TestSSL, self).tearDown() if getattr(self, 'inited', False): return def test_ssl_ok(self): """Make sure the public API works with HTTPS.""" self.cleanup() self.start_servers(**self.__dict__.copy()) path = "https://%s:%d/versions" % ("127.0.0.1", self.api_port) https = httplib2.Http(ca_certs=self.ca_file) response, content = https.request(path, 'GET') self.assertEqual(200, response.status) glance-12.0.0/glance/tests/functional/test_glance_manage.py0000664000567000056710000000523512701407047025102 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test cases for glance-manage""" import os import sys from glance.common import utils from glance.tests import functional from glance.tests.utils import depends_on_exe from glance.tests.utils import execute from glance.tests.utils import skip_if_disabled class TestGlanceManage(functional.FunctionalTest): """Functional tests for glance-manage""" def setUp(self): super(TestGlanceManage, self).setUp() conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') self.db_filepath = os.path.join(self.test_dir, 'tests.sqlite') self.connection = ('sql_connection = sqlite:///%s' % self.db_filepath) def _sync_db(self): with open(self.conf_filepath, 'wb') as conf_file: conf_file.write('[DEFAULT]\n') conf_file.write(self.connection) conf_file.flush() cmd = ('%s -m glance.cmd.manage --config-file %s db sync' % (sys.executable, self.conf_filepath)) execute(cmd, raise_error=True) def _assert_tables(self): cmd = "sqlite3 %s '.schema'" % self.db_filepath exitcode, out, err = execute(cmd, raise_error=True) self.assertIn('CREATE TABLE images', out) self.assertIn('CREATE TABLE image_tags', out) self.assertIn('CREATE TABLE image_locations', out) # NOTE(bcwaldon): For some reason we need double-quotes around # these two table names # NOTE(vsergeyev): There are some cases when we have no double-quotes self.assertTrue( 'CREATE TABLE "image_members"' in out or 'CREATE TABLE image_members' in out) self.assertTrue( 'CREATE TABLE "image_properties"' in out or 'CREATE TABLE image_properties' in out) @depends_on_exe('sqlite3') @skip_if_disabled def test_db_creation(self): """Test DB creation by db_sync on a fresh DB""" self._sync_db() self._assert_tables() glance-12.0.0/glance/tests/functional/test_bin_glance_cache_manage.py0000664000567000056710000002650712701407047027062 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test case that utilizes the bin/glance-cache-manage CLI tool""" import datetime import hashlib import os import sys import httplib2 from oslo_serialization import jsonutils from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.tests import functional from glance.tests.utils import execute from glance.tests.utils import minimal_headers FIVE_KB = 5 * units.Ki class TestBinGlanceCacheManage(functional.FunctionalTest): """Functional tests for the bin/glance CLI tool""" def setUp(self): self.image_cache_driver = "sqlite" super(TestBinGlanceCacheManage, self).setUp() self.api_server.deployment_flavor = "cachemanagement" # NOTE(sirp): This is needed in case we are running the tests under an # environment in which OS_AUTH_STRATEGY=keystone. The test server we # spin up won't have keystone support, so we need to switch to the # NoAuth strategy. os.environ['OS_AUTH_STRATEGY'] = 'noauth' os.environ['OS_AUTH_URL'] = '' def add_image(self, name): """ Adds an image with supplied name and returns the newly-created image identifier. """ image_data = "*" * FIVE_KB headers = minimal_headers(name) path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual(name, data['image']['name']) self.assertTrue(data['image']['is_public']) return data['image']['id'] def is_image_cached(self, image_id): """ Return True if supplied image ID is cached, False otherwise """ exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable cmd = "%s --port=%d list-cached" % (exe_cmd, self.api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) return image_id in out def iso_date(self, image_id): """ Return True if supplied image ID is cached, False otherwise """ exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable cmd = "%s --port=%d list-cached" % (exe_cmd, self.api_port) exitcode, out, err = execute(cmd) return datetime.datetime.utcnow().strftime("%Y-%m-%d") in out def test_no_cache_enabled(self): """ Test that cache index command works """ self.cleanup() self.api_server.deployment_flavor = '' self.start_servers() # Not passing in cache_manage in pipeline... api_port = self.api_port # Verify decent error message returned exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable cmd = "%s --port=%d list-cached" % (exe_cmd, api_port) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(1, exitcode) self.assertIn('Cache management middleware not enabled on host', out.strip()) self.stop_servers() def test_cache_index(self): """ Test that cache index command works """ self.cleanup() self.start_servers(**self.__dict__.copy()) api_port = self.api_port # Verify no cached images exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable cmd = "%s --port=%d list-cached" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn('No cached images', out.strip()) ids = {} # Add a few images and cache the second one of them # by GETing the image... for x in range(4): ids[x] = self.add_image("Image%s" % x) path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", api_port, ids[1]) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertTrue(self.is_image_cached(ids[1]), "%s is not cached." % ids[1]) self.assertTrue(self.iso_date(ids[1])) self.stop_servers() def test_queue(self): """ Test that we can queue and fetch images using the CLI utility """ self.cleanup() self.start_servers(**self.__dict__.copy()) api_port = self.api_port # Verify no cached images exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable cmd = "%s --port=%d list-cached" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn('No cached images', out.strip()) # Verify no queued images cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn('No queued images', out.strip()) ids = {} # Add a few images and cache the second one of them # by GETing the image... for x in range(4): ids[x] = self.add_image("Image%s" % x) # Queue second image and then cache it cmd = "%s --port=%d --force queue-image %s" % ( exe_cmd, api_port, ids[1]) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) # Verify queued second image cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn(ids[1], out, 'Image %s was not queued!' % ids[1]) # Cache images in the queue by running the prefetcher cache_config_filepath = os.path.join(self.test_dir, 'etc', 'glance-cache.conf') cache_file_options = { 'image_cache_dir': self.api_server.image_cache_dir, 'image_cache_driver': self.image_cache_driver, 'registry_port': self.registry_server.bind_port, 'log_file': os.path.join(self.test_dir, 'cache.log'), 'metadata_encryption_key': "012345678901234567890123456789ab", 'filesystem_store_datadir': self.test_dir } with open(cache_config_filepath, 'w') as cache_file: cache_file.write("""[DEFAULT] debug = True verbose = True image_cache_dir = %(image_cache_dir)s image_cache_driver = %(image_cache_driver)s registry_host = 127.0.0.1 registry_port = %(registry_port)s metadata_encryption_key = %(metadata_encryption_key)s log_file = %(log_file)s [glance_store] filesystem_store_datadir=%(filesystem_store_datadir)s """ % cache_file_options) cmd = ("%s -m glance.cmd.cache_prefetcher --config-file %s" % (sys.executable, cache_config_filepath)) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertEqual('', out.strip(), out) # Verify no queued images cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn('No queued images', out.strip()) # Verify second image now cached cmd = "%s --port=%d list-cached" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn(ids[1], out, 'Image %s was not cached!' % ids[1]) # Queue third image and then delete it from queue cmd = "%s --port=%d --force queue-image %s" % ( exe_cmd, api_port, ids[2]) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) # Verify queued third image cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn(ids[2], out, 'Image %s was not queued!' % ids[2]) # Delete the image from the queue cmd = ("%s --port=%d --force " "delete-queued-image %s") % (exe_cmd, api_port, ids[2]) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) # Verify no queued images cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn('No queued images', out.strip()) # Queue all images for x in range(4): cmd = ("%s --port=%d --force " "queue-image %s") % (exe_cmd, api_port, ids[x]) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) # Verify queued third image cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn('Found 3 queued images', out) # Delete the image from the queue cmd = ("%s --port=%d --force " "delete-all-queued-images") % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) # Verify nothing in queue anymore cmd = "%s --port=%d list-queued" % (exe_cmd, api_port) exitcode, out, err = execute(cmd) self.assertEqual(0, exitcode) self.assertIn('No queued images', out.strip()) # verify two image id when queue-image cmd = ("%s --port=%d --force " "queue-image %s %s") % (exe_cmd, api_port, ids[0], ids[1]) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(1, exitcode) self.assertIn('Please specify one and only ID of ' 'the image you wish to ', out.strip()) # verify two image id when delete-queued-image cmd = ("%s --port=%d --force delete-queued-image " "%s %s") % (exe_cmd, api_port, ids[0], ids[1]) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(1, exitcode) self.assertIn('Please specify one and only ID of ' 'the image you wish to ', out.strip()) # verify two image id when delete-cached-image cmd = ("%s --port=%d --force delete-cached-image " "%s %s") % (exe_cmd, api_port, ids[0], ids[1]) exitcode, out, err = execute(cmd, raise_error=False) self.assertEqual(1, exitcode) self.assertIn('Please specify one and only ID of ' 'the image you wish to ', out.strip()) self.stop_servers() glance-12.0.0/glance/tests/functional/db/0000775000567000056710000000000012701407204021303 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/functional/db/test_registry.py0000664000567000056710000000677612701407047024611 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import options import glance.db import glance.tests.functional.db as db_tests from glance.tests.functional.db import base from glance.tests.functional.db import base_metadef CONF = cfg.CONF def get_db(config): options.set_defaults(CONF, connection='sqlite://') config(data_api='glance.db.registry.api') return glance.db.get_api() def reset_db(db_api): pass class FunctionalInitWrapper(base.FunctionalInitWrapper): def setUp(self): # NOTE(flaper87): We need to start the # registry service *before* TestDriver's # setup goes on, since it'll create some # images that will be later used in tests. # # Python's request is way too magical and # it will make the TestDriver's super call # FunctionalTest's without letting us start # the server. # # This setUp will be called by TestDriver # and will be used to call FunctionalTest # setUp method *and* start the registry # service right after it. super(FunctionalInitWrapper, self).setUp() self.registry_server.deployment_flavor = 'fakeauth' self.start_with_retry(self.registry_server, 'registry_port', 3, api_version=2) self.config(registry_port=self.registry_server.bind_port, use_user_token=True) class TestRegistryDriver(base.TestDriver, base.DriverTests, FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestRegistryDriver, self).setUp() self.addCleanup(db_tests.reset) def tearDown(self): self.registry_server.stop() super(TestRegistryDriver, self).tearDown() class TestRegistryQuota(base.DriverQuotaTests, FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestRegistryQuota, self).setUp() self.addCleanup(db_tests.reset) def tearDown(self): self.registry_server.stop() super(TestRegistryQuota, self).tearDown() class TestRegistryMetadefDriver(base_metadef.TestMetadefDriver, base_metadef.MetadefDriverTests, FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestRegistryMetadefDriver, self).setUp() self.addCleanup(db_tests.reset) def tearDown(self): self.registry_server.stop() super(TestRegistryMetadefDriver, self).tearDown() class TestTasksDriver(base.TaskTests, FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestTasksDriver, self).setUp() self.addCleanup(db_tests.reset) def tearDown(self): self.registry_server.stop() super(TestTasksDriver, self).tearDown() glance-12.0.0/glance/tests/functional/db/test_sqlalchemy.py0000664000567000056710000001526112701407047025070 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import options from glance.common import exception import glance.db.sqlalchemy.api from glance.db.sqlalchemy import models as db_models from glance.db.sqlalchemy import models_glare as artifact_models from glance.db.sqlalchemy import models_metadef as metadef_models import glance.tests.functional.db as db_tests from glance.tests.functional.db import base from glance.tests.functional.db import base_glare from glance.tests.functional.db import base_metadef CONF = cfg.CONF def get_db(config): options.set_defaults(CONF, connection='sqlite://') config(verbose=False, debug=False) db_api = glance.db.sqlalchemy.api return db_api def reset_db(db_api): db_models.unregister_models(db_api.get_engine()) db_models.register_models(db_api.get_engine()) def reset_db_metadef(db_api): metadef_models.unregister_models(db_api.get_engine()) metadef_models.register_models(db_api.get_engine()) def reset_db_artifacts(db_api): artifact_models.unregister_models(db_api.get_engine()) artifact_models.register_models(db_api.get_engine()) class TestSqlAlchemyDriver(base.TestDriver, base.DriverTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyDriver, self).setUp() self.addCleanup(db_tests.reset) def test_get_image_with_invalid_long_image_id(self): image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' self.assertRaises(exception.NotFound, self.db_api._image_get, self.context, image_id) def test_image_tag_delete_with_invalid_long_image_id(self): image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' self.assertRaises(exception.NotFound, self.db_api.image_tag_delete, self.context, image_id, 'fake') def test_image_tag_get_all_with_invalid_long_image_id(self): image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' self.assertRaises(exception.NotFound, self.db_api.image_tag_get_all, self.context, image_id) def test_user_get_storage_usage_with_invalid_long_image_id(self): image_id = '343f9ba5-0197-41be-9543-16bbb32e12aa-xxxxxx' self.assertRaises(exception.NotFound, self.db_api.user_get_storage_usage, self.context, 'fake_owner_id', image_id) class TestSqlAlchemyVisibility(base.TestVisibility, base.VisibilityTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyVisibility, self).setUp() self.addCleanup(db_tests.reset) class TestSqlAlchemyMembershipVisibility(base.TestMembershipVisibility, base.MembershipVisibilityTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyMembershipVisibility, self).setUp() self.addCleanup(db_tests.reset) class TestSqlAlchemyDBDataIntegrity(base.TestDriver, base.FunctionalInitWrapper): """Test class for checking the data integrity in the database. Helpful in testing scenarios specific to the sqlalchemy api. """ def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyDBDataIntegrity, self).setUp() self.addCleanup(db_tests.reset) def test_paginate_redundant_sort_keys(self): original_method = self.db_api._paginate_query def fake_paginate_query(query, model, limit, sort_keys, marker, sort_dir, sort_dirs): self.assertEqual(['created_at', 'id'], sort_keys) return original_method(query, model, limit, sort_keys, marker, sort_dir, sort_dirs) self.stubs.Set(self.db_api, '_paginate_query', fake_paginate_query) self.db_api.image_get_all(self.context, sort_key=['created_at']) def test_paginate_non_redundant_sort_keys(self): original_method = self.db_api._paginate_query def fake_paginate_query(query, model, limit, sort_keys, marker, sort_dir, sort_dirs): self.assertEqual(['name', 'created_at', 'id'], sort_keys) return original_method(query, model, limit, sort_keys, marker, sort_dir, sort_dirs) self.stubs.Set(self.db_api, '_paginate_query', fake_paginate_query) self.db_api.image_get_all(self.context, sort_key=['name']) class TestSqlAlchemyTask(base.TaskTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyTask, self).setUp() self.addCleanup(db_tests.reset) class TestSqlAlchemyQuota(base.DriverQuotaTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSqlAlchemyQuota, self).setUp() self.addCleanup(db_tests.reset) class TestDBPurge(base.DBPurgeTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestDBPurge, self).setUp() self.addCleanup(db_tests.reset) class TestArtifacts(base_glare.ArtifactsTestDriver, base_glare.ArtifactTests): def setUp(self): db_tests.load(get_db, reset_db_artifacts) super(TestArtifacts, self).setUp() self.addCleanup(db_tests.reset) class TestMetadefSqlAlchemyDriver(base_metadef.TestMetadefDriver, base_metadef.MetadefDriverTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db_metadef) super(TestMetadefSqlAlchemyDriver, self).setUp() self.addCleanup(db_tests.reset) glance-12.0.0/glance/tests/functional/db/__init__.py0000664000567000056710000000206512701407047023424 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(markwash): These functions are used in the base tests cases to # set up the db api implementation under test. Rather than accessing them # directly, test modules should use the load and reset functions below. get_db = None reset_db = None def load(get_db_fn, reset_db_fn): global get_db, reset_db get_db = get_db_fn reset_db = reset_db_fn def reset(): global get_db, reset_db get_db = None reset_db = None glance-12.0.0/glance/tests/functional/db/base_glare.py0000664000567000056710000011000512701407047023743 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import six from six.moves import range from glance.common import exception as exc from glance import context import glance.glare as ga import glance.tests.functional.db as db_tests from glance.tests import utils as test_utils UUID1, UUID2 = ('80cc6551-9db4-42aa-bb58-51c48757f285', 'f89c675a-e01c-436c-a384-7d2e784fb2d9') TYPE_NAME = u'TestArtifactType' TYPE_VERSION = u'1.0.0' class ArtifactsTestDriver(test_utils.BaseTestCase): def setUp(self): super(ArtifactsTestDriver, self).setUp() context_cls = context.RequestContext self.adm_context = context_cls(is_admin=True, auth_token='user:user:admin', tenant='admin-tenant') self.context = context_cls(is_admin=False, auth_token='user:user:user', tenant='test-tenant') self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self.create_test_artifacts() def create_test_artifacts(self): dependency = {'2->1': [UUID1]} self.db_api.artifact_create(self.adm_context, get_fixture(id=UUID1, name="TestArtifact1", visibility="public"), TYPE_NAME, TYPE_VERSION) self.db_api.artifact_create(self.adm_context, get_fixture(id=UUID2, name="TestArtifact2", visibility="public", dependencies=dependency), TYPE_NAME, TYPE_VERSION) self.art1 = self.db_api.artifact_get(self.context, UUID1, TYPE_NAME, TYPE_VERSION) self.art2 = self.db_api.artifact_get(self.context, UUID2, TYPE_NAME, TYPE_VERSION) class ArtifactTests(object): def test_artifact_create(self): artifact = get_fixture() created = self.db_api.artifact_create(self.context, artifact, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone(created) self.assertEqual(artifact['name'], created['name']) self.assertEqual(artifact['type_name'], created['type_name']) self.assertEqual(artifact['type_version'], created['type_version']) def test_artifact_create_none_valued_props(self): artifact = get_fixture() artifact['properties']['lylyly'] = dict(value=None, type='int') artifact['properties']['hihihi'] = dict(value=5, type='int') created = self.db_api.artifact_create(self.context, artifact, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone(created) self.assertIn('hihihi', created['properties']) self.assertNotIn('lylyly', created['properties']) def test_artifact_update(self): fixture = {'name': 'UpdatedName'} updated = self.db_api.artifact_update(self.context, fixture, UUID1, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone(updated) self.assertEqual('UpdatedName', updated['name']) self.assertNotEqual(updated['created_at'], updated['updated_at']) def test_artifact_create_same_version_different_users(self): tenant1 = str(uuid.uuid4()) tenant2 = str(uuid.uuid4()) ctx1 = context.RequestContext(is_admin=False, tenant=tenant1) ctx2 = context.RequestContext(is_admin=False, tenant=tenant2) artifact1 = get_fixture(owner=tenant1) artifact2 = get_fixture(owner=tenant2) self.db_api.artifact_create(ctx1, artifact1, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone( self.db_api.artifact_create(ctx2, artifact2, TYPE_NAME, TYPE_VERSION)) def test_artifact_create_same_version_deleted(self): artifact1 = get_fixture() artifact2 = get_fixture(state='deleted') artifact3 = get_fixture(state='deleted') self.db_api.artifact_create(self.context, artifact1, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone( self.db_api.artifact_create(self.context, artifact2, TYPE_NAME, TYPE_VERSION)) self.assertIsNotNone( self.db_api.artifact_create(self.context, artifact3, TYPE_NAME, TYPE_VERSION)) def test_artifact_get(self): res = self.db_api.artifact_get(self.context, UUID1, TYPE_NAME, TYPE_VERSION) self.assertEqual('TestArtifact1', res['name']) self.assertEqual('TestArtifactType', res['type_name']) self.assertEqual('1.0.0', res['type_version']) self.assertEqual('10.0.3-alpha+some-date', res['version']) self.assertEqual('creating', res['state']) self.assertEqual('test-tenant', res['owner']) def test_artifact_get_owned(self): tenant1 = str(uuid.uuid4()) tenant2 = str(uuid.uuid4()) ctx1 = context.RequestContext(is_admin=False, tenant=tenant1) ctx2 = context.RequestContext(is_admin=False, tenant=tenant2) artifact = get_fixture(owner=tenant1) created = self.db_api.artifact_create(ctx1, artifact, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone(self.db_api.artifact_get(ctx1, created['id'], TYPE_NAME, TYPE_VERSION)) self.assertRaises(exc.ArtifactForbidden, self.db_api.artifact_get, ctx2, created['id'], TYPE_NAME, TYPE_VERSION) def test_artifact_get_public(self): tenant1 = str(uuid.uuid4()) tenant2 = str(uuid.uuid4()) ctx1 = context.RequestContext(is_admin=False, tenant=tenant1) ctx2 = context.RequestContext(is_admin=False, tenant=tenant2) artifact = get_fixture(owner=tenant1, visibility='public') created = self.db_api.artifact_create(ctx1, artifact, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone(self.db_api.artifact_get(ctx1, created['id'], TYPE_NAME, TYPE_VERSION)) self.assertIsNotNone(self.db_api.artifact_get(ctx2, created['id'], TYPE_NAME, TYPE_VERSION)) def test_artifact_update_state(self): res = self.db_api.artifact_update(self.context, {'state': 'active'}, UUID1, TYPE_NAME, TYPE_VERSION) self.assertEqual('active', res['state']) self.assertRaises(exc.InvalidArtifactStateTransition, self.db_api.artifact_update, self.context, {'state': 'creating'}, UUID1, TYPE_NAME, TYPE_VERSION) res = self.db_api.artifact_update(self.context, {'state': 'deactivated'}, UUID1, TYPE_NAME, TYPE_VERSION) self.assertEqual('deactivated', res['state']) res = self.db_api.artifact_update(self.context, {'state': 'active'}, UUID1, TYPE_NAME, TYPE_VERSION) self.assertEqual('active', res['state']) res = self.db_api.artifact_update(self.context, {'state': 'deleted'}, UUID1, TYPE_NAME, TYPE_VERSION) self.assertEqual('deleted', res['state']) self.assertRaises(exc.InvalidArtifactStateTransition, self.db_api.artifact_update, self.context, {'state': 'active'}, UUID1, TYPE_NAME, TYPE_VERSION) self.assertRaises(exc.InvalidArtifactStateTransition, self.db_api.artifact_update, self.context, {'state': 'deactivated'}, UUID1, TYPE_NAME, TYPE_VERSION) self.assertRaises(exc.InvalidArtifactStateTransition, self.db_api.artifact_update, self.context, {'state': 'creating'}, UUID1, TYPE_NAME, TYPE_VERSION) def test_artifact_update_tags(self): res = self.db_api.artifact_update(self.context, {'tags': ['gagaga', 'lalala']}, UUID1, TYPE_NAME, TYPE_VERSION) self.assertEqual(set(['gagaga', 'lalala']), set(res['tags'])) def test_artifact_update_properties(self): new_properties = {'properties': { 'propname1': { 'type': 'string', 'value': 'qeqeqe'}, 'propname2': { 'type': 'int', 'value': 6}, 'propname3': { 'type': 'int', 'value': '5'}, 'proparray': { 'type': 'string', 'value': 'notarray' }} } res = self.db_api.artifact_update(self.context, new_properties, UUID1, TYPE_NAME, TYPE_VERSION) bd_properties = res['properties'] self.assertEqual(4, len(bd_properties)) for prop in bd_properties: self.assertIn(prop, new_properties['properties']) def test_artifact_update_blobs(self): new_blobs = {'blobs': { 'blob1': [{ 'size': 2600000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL11', 'status': 'active'}, {'value': 'URL12', 'status': 'active'}] }, { 'size': 200000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'newURL21', 'status': 'active'}, {'value': 'URL22', 'status': 'passive'}] } ], 'blob2': [{ 'size': 120000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'URL22', 'status': 'active'}] }, { 'size': 300000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'bl1URL2', 'status': 'passive'}] } ] } } res = self.db_api.artifact_update(self.context, new_blobs, UUID1, TYPE_NAME, TYPE_VERSION) bd_blobs = res['blobs'] self.assertEqual(2, len(bd_blobs)) for blob in bd_blobs: self.assertIn(blob, new_blobs['blobs']) def test_artifact_create_with_dependency(self): dependencies = {"new->2": [UUID2]} artifact = get_fixture(dependencies=dependencies) res = self.db_api.artifact_create(self.context, artifact, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone(res) created = self.db_api.artifact_get( self.context, res['id'], TYPE_NAME, TYPE_VERSION, show_level=ga.Showlevel.DIRECT) bd_dependencies = created['dependencies'] self.assertEqual(1, len(bd_dependencies)) # now try to update artifact with the same dependency new_dependencies = {"dependencies": {"new->2": [UUID2], "new->3": [UUID2]}} res = self.db_api.artifact_update(self.context, new_dependencies, UUID1, TYPE_NAME, TYPE_VERSION) retrieved = self.db_api.artifact_get( self.context, res['id'], TYPE_NAME, TYPE_VERSION, show_level=ga.Showlevel.DIRECT) self.assertEqual(2, len(retrieved["dependencies"])) def test_artifact_create_transitive_dependencies(self): dependencies = {"new->2": [UUID2]} artifact = get_fixture(dependencies=dependencies, id='new') res = self.db_api.artifact_create(self.context, artifact, TYPE_NAME, TYPE_VERSION) self.assertIsNotNone(res) created = self.db_api.artifact_get( self.context, res['id'], TYPE_NAME, TYPE_VERSION, show_level=ga.Showlevel.DIRECT) bd_dependencies = created['dependencies'] self.assertEqual(1, len(bd_dependencies)) res = self.db_api.artifact_publish( self.context, res['id'], TYPE_NAME, TYPE_VERSION ) res = self.db_api.artifact_get( self.context, res['id'], TYPE_NAME, TYPE_VERSION, show_level=ga.Showlevel.TRANSITIVE) self.assertIsNotNone(res.pop('created_at')) self.assertIsNotNone(res.pop('updated_at')) # NOTE(mfedosin): tags is a set, so we have to check it separately tags = res.pop('tags', None) self.assertIsNotNone(tags) self.assertEqual(set(['gugugu', 'lalala']), set(tags)) tags = res['dependencies']['new->2'][0].pop('tags', None) self.assertIsNotNone(tags) self.assertEqual(set(['gugugu', 'lalala']), set(tags)) tags = (res['dependencies']['new->2'][0]['dependencies']['2->1'][0]. pop('tags', None)) self.assertIsNotNone(tags) self.assertEqual(set(['gugugu', 'lalala']), set(tags)) expected = { 'id': 'new', 'name': u'SomeArtifact', 'description': None, 'type_name': TYPE_NAME, 'type_version': TYPE_VERSION, 'version': u'10.0.3-alpha+some-date', 'visibility': u'private', 'state': u'active', 'owner': u'test-tenant', 'published_at': None, 'deleted_at': None, 'properties': { 'propname1': { 'type': 'string', 'value': 'tututu'}, 'propname2': { 'type': 'int', 'value': 5}, 'propname3': { 'type': 'string', 'value': 'vavava'}, 'proparray': { 'type': 'array', 'value': [ {'type': 'int', 'value': 6}, {'type': 'string', 'value': 'rerere'} ] } }, 'blobs': { 'blob1': [{ 'size': 1600000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL11', 'status': 'active'}, {'value': 'URL12', 'status': 'active'}] }, { 'size': 100000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'URL22', 'status': 'active'}] }] }, 'dependencies': { 'new->2': [ { 'id': UUID2, 'created_at': self.art2['created_at'], 'updated_at': self.art2['updated_at'], 'published_at': None, 'deleted_at': None, 'name': u'TestArtifact2', 'description': None, 'type_name': TYPE_NAME, 'type_version': TYPE_VERSION, 'version': u'10.0.3-alpha+some-date', 'visibility': 'public', 'state': u'creating', 'owner': u'test-tenant', 'properties': { 'propname1': { 'type': 'string', 'value': 'tututu'}, 'propname2': { 'type': 'int', 'value': 5}, 'propname3': { 'type': 'string', 'value': 'vavava'}, 'proparray': { 'type': 'array', 'value': [ {'type': 'int', 'value': 6}, {'type': 'string', 'value': 'rerere'} ] } }, 'blobs': { 'blob1': [{ 'size': 1600000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL11', 'status': 'active'}, {'value': 'URL12', 'status': 'active'}] }, { 'size': 100000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'URL22', 'status': 'active'}] }] }, 'dependencies': { '2->1': [ { 'id': UUID1, 'created_at': self.art1['created_at'], 'updated_at': self.art1['updated_at'], 'published_at': None, 'deleted_at': None, 'dependencies': {}, 'name': u'TestArtifact1', 'description': None, 'type_name': TYPE_NAME, 'type_version': TYPE_VERSION, 'version': u'10.0.3-alpha+some-date', 'visibility': 'public', 'state': u'creating', 'owner': u'test-tenant', 'properties': { 'propname1': { 'type': 'string', 'value': 'tututu'}, 'propname2': { 'type': 'int', 'value': 5}, 'propname3': { 'type': 'string', 'value': 'vavava'}, 'proparray': { 'type': 'array', 'value': [ {'type': 'int', 'value': 6}, {'type': 'string', 'value': 'rerere'} ] } }, 'blobs': { 'blob1': [{ 'size': 1600000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL11', 'status': 'active'}, {'value': 'URL12', 'status': 'active'}] }, { 'size': 100000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'URL22', 'status': 'active'}] }] } } ] } } ] } } self.assertIsNotNone(res['published_at']) published_at = res['published_at'] expected['published_at'] = published_at for key, value in six.iteritems(expected): self.assertEqual(expected[key], res[key]) def test_artifact_get_all(self): artifact = get_fixture(name='new_artifact') self.db_api.artifact_create(self.context, artifact, TYPE_NAME, TYPE_VERSION) artifacts = self.db_api.artifact_get_all(self.context) self.assertEqual(3, len(artifacts)) def test_artifact_sort_order(self): arts = [get_fixture(version='1.2.3-alpha.4.df.00f'), get_fixture(version='1.2.2'), get_fixture(version='1.2.3+some-metadata'), get_fixture(version='1.2.4'), get_fixture(version='1.2.3-release.2'), get_fixture(version='1.2.3-release.1+metadata'), get_fixture(version='1.2.3-final'), get_fixture(version='1.2.3-alpha.14.df.00f')] for art in arts: self.db_api.artifact_create(self.context, art, TYPE_NAME, TYPE_VERSION) artifacts = self.db_api.artifact_get_all(self.context, sort_keys=[('version', None)], sort_dirs=['asc']) expected_versions = [ '1.2.2', '1.2.3-alpha.4.df.00f', '1.2.3-alpha.14.df.00f', '1.2.3-final', '1.2.3-release.1+metadata', '1.2.3-release.2', '1.2.3+some-metadata', '1.2.4'] for i in range(len(expected_versions)): self.assertEqual(expected_versions[i], artifacts[i]['version']) def test_artifact_get_all_show_level(self): artifacts = self.db_api.artifact_get_all(self.context) self.assertEqual(2, len(artifacts)) self.assertRaises(KeyError, lambda: artifacts[0]['properties']) artifacts = self.db_api.artifact_get_all( self.context, show_level=ga.Showlevel.BASIC) self.assertEqual(2, len(artifacts)) self.assertEqual(4, len(artifacts[0]['properties'])) self.assertRaises(exc.ArtifactUnsupportedShowLevel, self.db_api.artifact_get_all, self.context, show_level=ga.Showlevel.DIRECT) def test_artifact_get_all_tags(self): artifact = get_fixture(name='new_artifact', tags=['qwerty', 'uiop']) self.db_api.artifact_create(self.context, artifact, TYPE_NAME, TYPE_VERSION) artifacts = self.db_api.artifact_get_all(self.context) self.assertEqual(3, len(artifacts)) filters = {'tags': [{ 'value': 'notag', }]} artifacts = self.db_api.artifact_get_all(self.context, filters=filters) self.assertEqual(0, len(artifacts)) filters = {'tags': [{ 'value': 'lalala', }]} artifacts = self.db_api.artifact_get_all(self.context, filters=filters) self.assertEqual(2, len(artifacts)) for artifact in artifacts: self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) def test_artifact_get_all_properties(self): artifact = get_fixture( name='new_artifact', properties={ 'newprop2': { 'type': 'string', 'value': 'tututu'}, 'propname2': { 'type': 'int', 'value': 3}, 'propname3': { 'type': 'string', 'value': 'vavava'}, 'proptext': { 'type': 'text', 'value': 'bebebe' * 100}, 'proparray': { 'type': 'array', 'value': [ {'type': 'int', 'value': 17}, {'type': 'string', 'value': 'rerere'} ] }}) self.db_api.artifact_create(self.context, artifact, TYPE_NAME, TYPE_VERSION) filters = {'propname2': [{ 'value': 4, 'operator': 'GT', 'type': 'int'}]} artifacts = self.db_api.artifact_get_all(self.context, filters=filters) self.assertEqual(2, len(artifacts)) for artifact in artifacts: self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) # position hasn't been set filters = {'proparray': [{ 'value': 6, 'operator': 'LE', 'type': 'int'}]} artifacts = self.db_api.artifact_get_all(self.context, filters=filters) self.assertEqual(0, len(artifacts)) for artifact in artifacts: self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) # position has been set filters = {'proparray': [{ 'value': 6, 'position': 0, 'operator': 'LE', 'type': 'int'}]} artifacts = self.db_api.artifact_get_all(self.context, filters=filters) self.assertEqual(2, len(artifacts)) for artifact in artifacts: self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) filters = {'proparray': [{ 'value': 6, 'operator': 'IN', 'type': 'int'}]} artifacts = self.db_api.artifact_get_all(self.context, filters=filters) self.assertEqual(2, len(artifacts)) for artifact in artifacts: self.assertIn(artifact['name'], ['TestArtifact1', 'TestArtifact2']) filters = {'name': [{'value': 'new_artifact'}]} artifacts = self.db_api.artifact_get_all(self.context, filters=filters, show_level=ga.Showlevel.BASIC) self.assertEqual(1, len(artifacts)) artifact = artifacts[0] self.assertEqual('new_artifact', artifact['name']) for prop in artifact['properties'].keys(): self.assertNotEqual('proptext', prop) filters = {'propname2': [{ 'value': 4, 'operator': 'FOO', 'type': 'int'}]} self.assertRaises( exc.ArtifactUnsupportedPropertyOperator, self.db_api.artifact_get_all, self.context, filters=filters) def test_artifact_delete(self): res = self.db_api.artifact_delete(self.context, UUID1, TYPE_NAME, TYPE_VERSION) self.assertEqual('TestArtifact1', res['name']) self.assertEqual('deleted', res['state']) self.assertIsNotNone(res['deleted_at']) artifacts = self.db_api.artifact_get_all(self.context) self.assertEqual(1, len(artifacts)) def test_artifact_delete_property(self): new_properties = {'properties': { 'proparray': {'value': [], 'type': 'array'} } } res = self.db_api.artifact_update(self.context, new_properties, UUID1, TYPE_NAME, TYPE_VERSION) bd_properties = res['properties'] self.assertEqual(3, len(bd_properties)) expected = { 'propname1': { 'type': 'string', 'value': 'tututu'}, 'propname2': { 'type': 'int', 'value': 5}, 'propname3': { 'type': 'string', 'value': 'vavava'} } for prop in bd_properties: self.assertIn(prop, expected) def test_artifact_delete_blob(self): new_blobs = {'blobs': { 'blob2': [{ 'size': 2600000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL11', 'status': 'active'}, {'value': 'URL12', 'status': 'active'}] }, { 'size': 200000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'newURL21', 'status': 'active'}, {'value': 'URL22', 'status': 'passive'}] } ], 'blob3': [{ 'size': 120000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'URL22', 'status': 'active'}] }, { 'size': 300000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'bl1URL2', 'status': 'passive'}] } ] } } expected = {'blobs': { 'blob1': [{ 'size': 1600000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL11', 'status': 'active'}, {'value': 'URL12', 'status': 'active'}] }, { 'size': 100000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'URL22', 'status': 'active'}] } ], 'blob2': [{ 'size': 2600000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL11', 'status': 'active'}, {'value': 'URL12', 'status': 'active'}] }, { 'size': 200000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'newURL21', 'status': 'active'}, {'value': 'URL22', 'status': 'passive'}] } ], 'blob3': [{ 'size': 120000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'URL22', 'status': 'active'}] }, { 'size': 300000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'bl1URL2', 'status': 'passive'}] } ] } } res = self.db_api.artifact_update(self.context, new_blobs, UUID1, TYPE_NAME, TYPE_VERSION) bd_blobs = res['blobs'] self.assertEqual(3, len(bd_blobs)) for blob in bd_blobs: self.assertIn(blob, expected['blobs']) del_blobs = {'blobs': { 'blob1': []} } res = self.db_api.artifact_update(self.context, del_blobs, UUID1, TYPE_NAME, TYPE_VERSION) bd_blobs = res['blobs'] self.assertEqual(2, len(bd_blobs)) for blob in bd_blobs: self.assertIn(blob, new_blobs['blobs']) def get_fixture(**kwargs): artifact = { 'name': u'SomeArtifact', 'type_name': TYPE_NAME, 'type_version': TYPE_VERSION, 'version': u'10.0.3-alpha+some-date', 'visibility': u'private', 'state': u'creating', 'owner': u'test-tenant', 'tags': ['lalala', 'gugugu'], 'properties': { 'propname1': { 'type': 'string', 'value': 'tututu'}, 'propname2': { 'type': 'int', 'value': 5}, 'propname3': { 'type': 'string', 'value': 'vavava'}, 'proparray': { 'type': 'array', 'value': [ {'type': 'int', 'value': 6}, {'type': 'string', 'value': 'rerere'} ] } }, 'blobs': { 'blob1': [{ 'size': 1600000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL11', 'status': 'active'}, {'value': 'URL12', 'status': 'active'}] }, { 'size': 100000, 'checksum': 'abc', 'item_key': 'some', 'locations': [ {'value': 'URL21', 'status': 'active'}, {'value': 'URL22', 'status': 'active'}] } ] } } artifact.update(kwargs) return artifact glance-12.0.0/glance/tests/functional/db/base.py0000664000567000056710000030271412701407047022603 0ustar jenkinsjenkins00000000000000# Copyright 2010-2012 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import uuid import mock # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from six.moves import reduce from glance.common import exception from glance.common import timeutils from glance import context from glance.tests import functional import glance.tests.functional.db as db_tests from glance.tests import utils as test_utils # The default sort order of results is whatever sort key is specified, # plus created_at and id for ties. When we're not specifying a sort_key, # we get the default (created_at). Some tests below expect the fixtures to be # returned in array-order, so if the created_at timestamps are the same, # these tests rely on the UUID* values being in order UUID1, UUID2, UUID3 = sorted([str(uuid.uuid4()) for x in range(3)]) def build_image_fixture(**kwargs): default_datetime = timeutils.utcnow() image = { 'id': str(uuid.uuid4()), 'name': 'fake image #2', 'status': 'active', 'disk_format': 'vhd', 'container_format': 'ovf', 'is_public': True, 'created_at': default_datetime, 'updated_at': default_datetime, 'deleted_at': None, 'deleted': False, 'checksum': None, 'min_disk': 5, 'min_ram': 256, 'size': 19, 'locations': [{'url': "file:///tmp/glance-tests/2", 'metadata': {}, 'status': 'active'}], 'properties': {}, } image.update(kwargs) return image def build_task_fixture(**kwargs): default_datetime = timeutils.utcnow() task = { 'id': str(uuid.uuid4()), 'type': 'import', 'status': 'pending', 'input': {'ping': 'pong'}, 'owner': str(uuid.uuid4()), 'message': None, 'expires_at': None, 'created_at': default_datetime, 'updated_at': default_datetime, } task.update(kwargs) return task class FunctionalInitWrapper(functional.FunctionalTest): def setUp(self): super(FunctionalInitWrapper, self).setUp() self.config(policy_file=self.policy_file, group='oslo_policy') class TestDriver(test_utils.BaseTestCase): def setUp(self): super(TestDriver, self).setUp() context_cls = context.RequestContext self.adm_context = context_cls(is_admin=True, auth_token='user:user:admin') self.context = context_cls(is_admin=False, auth_token='user:user:user') self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self.fixtures = self.build_image_fixtures() self.create_images(self.fixtures) def build_image_fixtures(self): dt1 = timeutils.utcnow() dt2 = dt1 + datetime.timedelta(microseconds=5) fixtures = [ { 'id': UUID1, 'created_at': dt1, 'updated_at': dt1, 'properties': {'foo': 'bar', 'far': 'boo'}, 'size': 13, }, { 'id': UUID2, 'created_at': dt1, 'updated_at': dt2, 'size': 17, }, { 'id': UUID3, 'created_at': dt2, 'updated_at': dt2, }, ] return [build_image_fixture(**fixture) for fixture in fixtures] def create_images(self, images): for fixture in images: self.db_api.image_create(self.adm_context, fixture) class DriverTests(object): def test_image_create_requires_status(self): fixture = {'name': 'mark', 'size': 12} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) fixture = {'name': 'mark', 'size': 12, 'status': 'queued'} self.db_api.image_create(self.context, fixture) @mock.patch.object(timeutils, 'utcnow') def test_image_create_defaults(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() create_time = timeutils.utcnow() values = {'status': 'queued', 'created_at': create_time, 'updated_at': create_time} image = self.db_api.image_create(self.context, values) self.assertIsNone(image['name']) self.assertIsNone(image['container_format']) self.assertEqual(0, image['min_ram']) self.assertEqual(0, image['min_disk']) self.assertIsNone(image['owner']) self.assertFalse(image['is_public']) self.assertIsNone(image['size']) self.assertIsNone(image['checksum']) self.assertIsNone(image['disk_format']) self.assertEqual([], image['locations']) self.assertFalse(image['protected']) self.assertFalse(image['deleted']) self.assertIsNone(image['deleted_at']) self.assertEqual([], image['properties']) self.assertEqual(create_time, image['created_at']) self.assertEqual(create_time, image['updated_at']) # Image IDs aren't predictable, but they should be populated self.assertTrue(uuid.UUID(image['id'])) # NOTE(bcwaldon): the tags attribute should not be returned as a part # of a core image entity self.assertNotIn('tags', image) def test_image_create_duplicate_id(self): self.assertRaises(exception.Duplicate, self.db_api.image_create, self.context, {'id': UUID1, 'status': 'queued'}) def test_image_create_with_locations(self): locations = [{'url': 'a', 'metadata': {}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': locations} image = self.db_api.image_create(self.context, fixture) actual = [{'url': l['url'], 'metadata': l['metadata'], 'status': l['status']} for l in image['locations']] self.assertEqual(locations, actual) def test_image_create_without_locations(self): locations = [] fixture = {'status': 'queued', 'locations': locations} self.db_api.image_create(self.context, fixture) def test_image_create_with_location_data(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': location_data} image = self.db_api.image_create(self.context, fixture) actual = [{'url': l['url'], 'metadata': l['metadata'], 'status': l['status']} for l in image['locations']] self.assertEqual(location_data, actual) def test_image_create_properties(self): fixture = {'status': 'queued', 'properties': {'ping': 'pong'}} image = self.db_api.image_create(self.context, fixture) expected = [{'name': 'ping', 'value': 'pong'}] actual = [{'name': p['name'], 'value': p['value']} for p in image['properties']] self.assertEqual(expected, actual) def test_image_create_unknown_attributes(self): fixture = {'ping': 'pong'} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_name(self): bad_name = u'A name with forbidden symbol \U0001f62a' fixture = {'name': bad_name, 'size': 12, 'status': 'queued'} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_checksum(self): # checksum should be no longer than 32 characters bad_checksum = "42" * 42 fixture = {'checksum': bad_checksum} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) # if checksum is not longer than 32 characters but non-ascii -> # still raise 400 fixture = {'checksum': u'\u042f' * 32} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_int_params(self): int_too_long = 2 ** 31 + 42 for param in ['min_disk', 'min_ram']: fixture = {param: int_too_long} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_property(self): # bad value fixture = {'status': 'queued', 'properties': {'bad': u'Bad \U0001f62a'}} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) # bad property names are also not allowed fixture = {'status': 'queued', 'properties': {u'Bad \U0001f62a': 'ok'}} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_create_bad_location(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': u'Bad \U0001f60a', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': location_data} self.assertRaises(exception.Invalid, self.db_api.image_create, self.context, fixture) def test_image_update_core_attribute(self): fixture = {'status': 'queued'} image = self.db_api.image_update(self.adm_context, UUID3, fixture) self.assertEqual('queued', image['status']) self.assertNotEqual(image['created_at'], image['updated_at']) def test_image_update_with_locations(self): locations = [{'url': 'a', 'metadata': {}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'locations': locations} image = self.db_api.image_update(self.adm_context, UUID3, fixture) self.assertEqual(2, len(image['locations'])) self.assertIn('id', image['locations'][0]) self.assertIn('id', image['locations'][1]) image['locations'][0].pop('id') image['locations'][1].pop('id') self.assertEqual(locations, image['locations']) def test_image_update_with_location_data(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'locations': location_data} image = self.db_api.image_update(self.adm_context, UUID3, fixture) self.assertEqual(2, len(image['locations'])) self.assertIn('id', image['locations'][0]) self.assertIn('id', image['locations'][1]) image['locations'][0].pop('id') image['locations'][1].pop('id') self.assertEqual(location_data, image['locations']) def test_image_update(self): fixture = {'status': 'queued', 'properties': {'ping': 'pong'}} image = self.db_api.image_update(self.adm_context, UUID3, fixture) expected = [{'name': 'ping', 'value': 'pong'}] actual = [{'name': p['name'], 'value': p['value']} for p in image['properties']] self.assertEqual(expected, actual) self.assertEqual('queued', image['status']) self.assertNotEqual(image['created_at'], image['updated_at']) def test_image_update_properties(self): fixture = {'properties': {'ping': 'pong'}} image = self.db_api.image_update(self.adm_context, UUID1, fixture) expected = {'ping': 'pong', 'foo': 'bar', 'far': 'boo'} actual = {p['name']: p['value'] for p in image['properties']} self.assertEqual(expected, actual) self.assertNotEqual(image['created_at'], image['updated_at']) def test_image_update_purge_properties(self): fixture = {'properties': {'ping': 'pong'}} image = self.db_api.image_update(self.adm_context, UUID1, fixture, purge_props=True) properties = {p['name']: p for p in image['properties']} # New properties are set self.assertIn('ping', properties) self.assertEqual('pong', properties['ping']['value']) self.assertFalse(properties['ping']['deleted']) # Original properties still show up, but with deleted=True # TODO(markwash): db api should not return deleted properties self.assertIn('foo', properties) self.assertEqual('bar', properties['foo']['value']) self.assertTrue(properties['foo']['deleted']) def test_image_update_bad_name(self): fixture = {'name': u'A new name with forbidden symbol \U0001f62a'} self.assertRaises(exception.Invalid, self.db_api.image_update, self.adm_context, UUID1, fixture) def test_image_update_bad_property(self): # bad value fixture = {'status': 'queued', 'properties': {'bad': u'Bad \U0001f62a'}} self.assertRaises(exception.Invalid, self.db_api.image_update, self.adm_context, UUID1, fixture) # bad property names are also not allowed fixture = {'status': 'queued', 'properties': {u'Bad \U0001f62a': 'ok'}} self.assertRaises(exception.Invalid, self.db_api.image_update, self.adm_context, UUID1, fixture) def test_image_update_bad_location(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': u'Bad \U0001f60a', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': location_data} self.assertRaises(exception.Invalid, self.db_api.image_update, self.adm_context, UUID1, fixture) def test_update_locations_direct(self): """ For some reasons update_locations can be called directly (not via image_update), so better check that everything is ok if passed 4 byte unicode characters """ # update locations correctly first to retrieve existing location id location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}] fixture = {'locations': location_data} image = self.db_api.image_update(self.adm_context, UUID1, fixture) self.assertEqual(1, len(image['locations'])) self.assertIn('id', image['locations'][0]) loc_id = image['locations'][0].pop('id') bad_location = {'url': u'Bad \U0001f60a', 'metadata': {}, 'status': 'active', 'id': loc_id} self.assertRaises(exception.Invalid, self.db_api.image_location_update, self.adm_context, UUID1, bad_location) def test_image_property_delete(self): fixture = {'name': 'ping', 'value': 'pong', 'image_id': UUID1} prop = self.db_api.image_property_create(self.context, fixture) prop = self.db_api.image_property_delete(self.context, prop['name'], UUID1) self.assertIsNotNone(prop['deleted_at']) self.assertTrue(prop['deleted']) def test_image_get(self): image = self.db_api.image_get(self.context, UUID1) self.assertEqual(self.fixtures[0]['id'], image['id']) def test_image_get_disallow_deleted(self): self.db_api.image_destroy(self.adm_context, UUID1) self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, UUID1) def test_image_get_allow_deleted(self): self.db_api.image_destroy(self.adm_context, UUID1) image = self.db_api.image_get(self.adm_context, UUID1) self.assertEqual(self.fixtures[0]['id'], image['id']) self.assertTrue(image['deleted']) def test_image_get_force_allow_deleted(self): self.db_api.image_destroy(self.adm_context, UUID1) image = self.db_api.image_get(self.context, UUID1, force_show_deleted=True) self.assertEqual(self.fixtures[0]['id'], image['id']) def test_image_get_not_owned(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) image = self.db_api.image_create( ctxt1, {'status': 'queued', 'owner': TENANT1}) self.assertRaises(exception.Forbidden, self.db_api.image_get, ctxt2, image['id']) def test_image_get_not_found(self): UUID = str(uuid.uuid4()) self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, UUID) def test_image_get_all(self): images = self.db_api.image_get_all(self.context) self.assertEqual(3, len(images)) def test_image_get_all_with_filter(self): images = self.db_api.image_get_all(self.context, filters={ 'id': self.fixtures[0]['id'], }) self.assertEqual(1, len(images)) self.assertEqual(self.fixtures[0]['id'], images[0]['id']) def test_image_get_all_with_filter_user_defined_property(self): images = self.db_api.image_get_all(self.context, filters={'foo': 'bar'}) self.assertEqual(1, len(images)) self.assertEqual(self.fixtures[0]['id'], images[0]['id']) def test_image_get_all_with_filter_nonexistent_userdef_property(self): images = self.db_api.image_get_all(self.context, filters={'faz': 'boo'}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_userdef_prop_nonexistent_value(self): images = self.db_api.image_get_all(self.context, filters={'foo': 'baz'}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_multiple_user_defined_properties(self): images = self.db_api.image_get_all(self.context, filters={'foo': 'bar', 'far': 'boo'}) self.assertEqual(1, len(images)) self.assertEqual(images[0]['id'], self.fixtures[0]['id']) def test_image_get_all_with_filter_nonexistent_user_defined_property(self): images = self.db_api.image_get_all(self.context, filters={'foo': 'bar', 'faz': 'boo'}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_user_deleted_property(self): fixture = {'name': 'poo', 'value': 'bear', 'image_id': UUID1} prop = self.db_api.image_property_create(self.context, fixture) images = self.db_api.image_get_all(self.context, filters={ 'properties': {'poo': 'bear'}, }) self.assertEqual(1, len(images)) self.db_api.image_property_delete(self.context, prop['name'], images[0]['id']) images = self.db_api.image_get_all(self.context, filters={ 'properties': {'poo': 'bear'}, }) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_undefined_property(self): images = self.db_api.image_get_all(self.context, filters={'poo': 'bear'}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_comparative_created_at(self): anchor = timeutils.isotime(self.fixtures[0]['created_at']) time_expr = 'lt:' + anchor images = self.db_api.image_get_all(self.context, filters={'created_at': time_expr}) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_comparative_updated_at(self): anchor = timeutils.isotime(self.fixtures[0]['updated_at']) time_expr = 'lt:' + anchor images = self.db_api.image_get_all(self.context, filters={'updated_at': time_expr}) self.assertEqual(0, len(images)) def test_filter_image_by_invalid_operator(self): self.assertRaises(exception.InvalidFilterOperatorValue, self.db_api.image_get_all, self.context, filters={'status': 'lala:active'}) def test_image_get_all_with_filter_in_status(self): images = self.db_api.image_get_all(self.context, filters={'status': 'in:active'}) self.assertEqual(3, len(images)) def test_image_get_all_with_filter_in_name(self): data = 'in:%s' % self.fixtures[0]['name'] images = self.db_api.image_get_all(self.context, filters={'name': data}) self.assertEqual(3, len(images)) def test_image_get_all_with_filter_in_container_format(self): images = self.db_api.image_get_all(self.context, filters={'container_format': 'in:ami,bare,ovf'}) self.assertEqual(3, len(images)) def test_image_get_all_with_filter_in_disk_format(self): images = self.db_api.image_get_all(self.context, filters={'disk_format': 'in:vhd'}) self.assertEqual(3, len(images)) def test_image_get_all_with_filter_in_id(self): data = 'in:%s,%s' % (UUID1, UUID2) images = self.db_api.image_get_all(self.context, filters={'id': data}) self.assertEqual(2, len(images)) def test_image_get_all_with_quotes(self): fixture = {'name': 'fake\\\"name'} self.db_api.image_update(self.adm_context, UUID3, fixture) fixture = {'name': 'fake,name'} self.db_api.image_update(self.adm_context, UUID2, fixture) fixture = {'name': 'fakename'} self.db_api.image_update(self.adm_context, UUID1, fixture) data = 'in:\"fake\\\"name\",fakename,\"fake,name\"' images = self.db_api.image_get_all(self.context, filters={'name': data}) self.assertEqual(3, len(images)) def test_image_get_all_with_invalid_quotes(self): invalid_expr = ['in:\"name', 'in:\"name\"name', 'in:name\"dd\"', 'in:na\"me', 'in:\"name\"\"name\"'] for expr in invalid_expr: self.assertRaises(exception.InvalidParameterValue, self.db_api.image_get_all, self.context, filters={'name': expr}) def test_image_get_all_size_min_max(self): images = self.db_api.image_get_all(self.context, filters={ 'size_min': 10, 'size_max': 15, }) self.assertEqual(1, len(images)) self.assertEqual(self.fixtures[0]['id'], images[0]['id']) def test_image_get_all_size_min(self): images = self.db_api.image_get_all(self.context, filters={'size_min': 15}) self.assertEqual(2, len(images)) self.assertEqual(self.fixtures[2]['id'], images[0]['id']) self.assertEqual(self.fixtures[1]['id'], images[1]['id']) def test_image_get_all_size_range(self): images = self.db_api.image_get_all(self.context, filters={'size_max': 15, 'size_min': 20}) self.assertEqual(0, len(images)) def test_image_get_all_size_max(self): images = self.db_api.image_get_all(self.context, filters={'size_max': 15}) self.assertEqual(1, len(images)) self.assertEqual(self.fixtures[0]['id'], images[0]['id']) def test_image_get_all_with_filter_min_range_bad_value(self): self.assertRaises(exception.InvalidFilterRangeValue, self.db_api.image_get_all, self.context, filters={'size_min': 'blah'}) def test_image_get_all_with_filter_max_range_bad_value(self): self.assertRaises(exception.InvalidFilterRangeValue, self.db_api.image_get_all, self.context, filters={'size_max': 'blah'}) def test_image_get_all_marker(self): images = self.db_api.image_get_all(self.context, marker=UUID3) self.assertEqual(2, len(images)) def test_image_get_all_marker_with_size(self): # Use sort_key=size to test BigInteger images = self.db_api.image_get_all(self.context, sort_key=['size'], marker=UUID3) self.assertEqual(2, len(images)) self.assertEqual(17, images[0]['size']) self.assertEqual(13, images[1]['size']) def test_image_get_all_marker_deleted(self): """Cannot specify a deleted image as a marker.""" self.db_api.image_destroy(self.adm_context, UUID1) filters = {'deleted': False} self.assertRaises(exception.NotFound, self.db_api.image_get_all, self.context, marker=UUID1, filters=filters) def test_image_get_all_marker_deleted_showing_deleted_as_admin(self): """Specify a deleted image as a marker if showing deleted images.""" self.db_api.image_destroy(self.adm_context, UUID3) images = self.db_api.image_get_all(self.adm_context, marker=UUID3) # NOTE(bcwaldon): an admin should see all images (deleted or not) self.assertEqual(2, len(images)) def test_image_get_all_marker_deleted_showing_deleted(self): """Specify a deleted image as a marker if showing deleted images. A non-admin user has to explicitly ask for deleted images, and should only see deleted images in the results """ self.db_api.image_destroy(self.adm_context, UUID3) self.db_api.image_destroy(self.adm_context, UUID1) filters = {'deleted': True} images = self.db_api.image_get_all(self.context, marker=UUID3, filters=filters) self.assertEqual(1, len(images)) def test_image_get_all_marker_null_name_desc(self): """Check an image with name null is handled Check an image with name null is handled marker is specified and order is descending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'name': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['name'], sort_dir=['desc']) image_ids = [image['id'] for image in images] expected = [] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_disk_format_desc(self): """Check an image with disk_format null is handled Check an image with disk_format null is handled when marker is specified and order is descending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'disk_format': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['disk_format'], sort_dir=['desc']) image_ids = [image['id'] for image in images] expected = [] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_container_format_desc(self): """Check an image with container_format null is handled Check an image with container_format null is handled when marker is specified and order is descending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'container_format': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['container_format'], sort_dir=['desc']) image_ids = [image['id'] for image in images] expected = [] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_name_asc(self): """Check an image with name null is handled Check an image with name null is handled when marker is specified and order is ascending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'name': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['name'], sort_dir=['asc']) image_ids = [image['id'] for image in images] expected = [UUID3, UUID2, UUID1] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_disk_format_asc(self): """Check an image with disk_format null is handled Check an image with disk_format null is handled when marker is specified and order is ascending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'disk_format': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['disk_format'], sort_dir=['asc']) image_ids = [image['id'] for image in images] expected = [UUID3, UUID2, UUID1] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_marker_null_container_format_asc(self): """Check an image with container_format null is handled Check an image with container_format null is handled when marker is specified and order is ascending """ TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'container_format': None, 'owner': TENANT1}) images = self.db_api.image_get_all(ctxt1, marker=UUIDX, sort_key=['container_format'], sort_dir=['asc']) image_ids = [image['id'] for image in images] expected = [UUID3, UUID2, UUID1] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_limit(self): images = self.db_api.image_get_all(self.context, limit=2) self.assertEqual(2, len(images)) # A limit of None should not equate to zero images = self.db_api.image_get_all(self.context, limit=None) self.assertEqual(3, len(images)) # A limit of zero should actually mean zero images = self.db_api.image_get_all(self.context, limit=0) self.assertEqual(0, len(images)) def test_image_get_all_owned(self): TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) image_meta_data = {'id': UUIDX, 'status': 'queued', 'owner': TENANT1} self.db_api.image_create(ctxt1, image_meta_data) TENANT2 = str(uuid.uuid4()) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) UUIDY = str(uuid.uuid4()) image_meta_data = {'id': UUIDY, 'status': 'queued', 'owner': TENANT2} self.db_api.image_create(ctxt2, image_meta_data) images = self.db_api.image_get_all(ctxt1) image_ids = [image['id'] for image in images] expected = [UUIDX, UUID3, UUID2, UUID1] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_owned_checksum(self): TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) UUIDX = str(uuid.uuid4()) CHECKSUM1 = '91264c3edf5972c9f1cb309543d38a5c' image_meta_data = { 'id': UUIDX, 'status': 'queued', 'checksum': CHECKSUM1, 'owner': TENANT1 } self.db_api.image_create(ctxt1, image_meta_data) image_member_data = { 'image_id': UUIDX, 'member': TENANT1, 'can_share': False, "status": "accepted", } self.db_api.image_member_create(ctxt1, image_member_data) TENANT2 = str(uuid.uuid4()) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) UUIDY = str(uuid.uuid4()) CHECKSUM2 = '92264c3edf5972c9f1cb309543d38a5c' image_meta_data = { 'id': UUIDY, 'status': 'queued', 'checksum': CHECKSUM2, 'owner': TENANT2 } self.db_api.image_create(ctxt2, image_meta_data) image_member_data = { 'image_id': UUIDY, 'member': TENANT2, 'can_share': False, "status": "accepted", } self.db_api.image_member_create(ctxt2, image_member_data) filters = {'visibility': 'shared', 'checksum': CHECKSUM2} images = self.db_api.image_get_all(ctxt2, filters) self.assertEqual(1, len(images)) self.assertEqual(UUIDY, images[0]['id']) def test_image_get_all_with_filter_tags(self): self.db_api.image_tag_create(self.context, UUID1, 'x86') self.db_api.image_tag_create(self.context, UUID1, '64bit') self.db_api.image_tag_create(self.context, UUID2, 'power') self.db_api.image_tag_create(self.context, UUID2, '64bit') images = self.db_api.image_get_all(self.context, filters={'tags': ['64bit']}) self.assertEqual(2, len(images)) image_ids = [image['id'] for image in images] expected = [UUID1, UUID2] self.assertEqual(sorted(expected), sorted(image_ids)) def test_image_get_all_with_filter_multi_tags(self): self.db_api.image_tag_create(self.context, UUID1, 'x86') self.db_api.image_tag_create(self.context, UUID1, '64bit') self.db_api.image_tag_create(self.context, UUID2, 'power') self.db_api.image_tag_create(self.context, UUID2, '64bit') images = self.db_api.image_get_all(self.context, filters={'tags': ['64bit', 'power'] }) self.assertEqual(1, len(images)) self.assertEqual(UUID2, images[0]['id']) def test_image_get_all_with_filter_tags_and_nonexistent(self): self.db_api.image_tag_create(self.context, UUID1, 'x86') images = self.db_api.image_get_all(self.context, filters={'tags': ['x86', 'fake'] }) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_deleted_tags(self): tag = self.db_api.image_tag_create(self.context, UUID1, 'AIX') images = self.db_api.image_get_all(self.context, filters={ 'tags': [tag], }) self.assertEqual(1, len(images)) self.db_api.image_tag_delete(self.context, UUID1, tag) images = self.db_api.image_get_all(self.context, filters={ 'tags': [tag], }) self.assertEqual(0, len(images)) def test_image_get_all_with_filter_undefined_tags(self): images = self.db_api.image_get_all(self.context, filters={'tags': ['fake']}) self.assertEqual(0, len(images)) def test_image_paginate(self): """Paginate through a list of images using limit and marker""" now = timeutils.utcnow() extra_uuids = [(str(uuid.uuid4()), now + datetime.timedelta(seconds=i * 5)) for i in range(2)] extra_images = [build_image_fixture(id=_id, created_at=_dt, updated_at=_dt) for _id, _dt in extra_uuids] self.create_images(extra_images) # Reverse uuids to match default sort of created_at extra_uuids.reverse() page = self.db_api.image_get_all(self.context, limit=2) self.assertEqual([i[0] for i in extra_uuids], [i['id'] for i in page]) last = page[-1]['id'] page = self.db_api.image_get_all(self.context, limit=2, marker=last) self.assertEqual([UUID3, UUID2], [i['id'] for i in page]) page = self.db_api.image_get_all(self.context, limit=2, marker=UUID2) self.assertEqual([UUID1], [i['id'] for i in page]) def test_image_get_all_invalid_sort_key(self): self.assertRaises(exception.InvalidSortKey, self.db_api.image_get_all, self.context, sort_key=['blah']) def test_image_get_all_limit_marker(self): images = self.db_api.image_get_all(self.context, limit=2) self.assertEqual(2, len(images)) def test_image_get_all_with_tag_returning(self): expected_tags = {UUID1: ['foo'], UUID2: ['bar'], UUID3: ['baz']} self.db_api.image_tag_create(self.context, UUID1, expected_tags[UUID1][0]) self.db_api.image_tag_create(self.context, UUID2, expected_tags[UUID2][0]) self.db_api.image_tag_create(self.context, UUID3, expected_tags[UUID3][0]) images = self.db_api.image_get_all(self.context, return_tag=True) self.assertEqual(3, len(images)) for image in images: self.assertIn('tags', image) self.assertEqual(expected_tags[image['id']], image['tags']) self.db_api.image_tag_delete(self.context, UUID1, expected_tags[UUID1][0]) expected_tags[UUID1] = [] images = self.db_api.image_get_all(self.context, return_tag=True) self.assertEqual(3, len(images)) for image in images: self.assertIn('tags', image) self.assertEqual(expected_tags[image['id']], image['tags']) def test_image_destroy(self): location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] fixture = {'status': 'queued', 'locations': location_data} image = self.db_api.image_create(self.context, fixture) IMG_ID = image['id'] fixture = {'name': 'ping', 'value': 'pong', 'image_id': IMG_ID} prop = self.db_api.image_property_create(self.context, fixture) TENANT2 = str(uuid.uuid4()) fixture = {'image_id': IMG_ID, 'member': TENANT2, 'can_share': False} member = self.db_api.image_member_create(self.context, fixture) self.db_api.image_tag_create(self.context, IMG_ID, 'snarf') self.assertEqual(2, len(image['locations'])) self.assertIn('id', image['locations'][0]) self.assertIn('id', image['locations'][1]) image['locations'][0].pop('id') image['locations'][1].pop('id') self.assertEqual(location_data, image['locations']) self.assertEqual(('ping', 'pong', IMG_ID, False), (prop['name'], prop['value'], prop['image_id'], prop['deleted'])) self.assertEqual((TENANT2, IMG_ID, False), (member['member'], member['image_id'], member['can_share'])) self.assertEqual(['snarf'], self.db_api.image_tag_get_all(self.context, IMG_ID)) image = self.db_api.image_destroy(self.adm_context, IMG_ID) self.assertTrue(image['deleted']) self.assertTrue(image['deleted_at']) self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, IMG_ID) self.assertEqual([], image['locations']) prop = image['properties'][0] self.assertEqual(('ping', IMG_ID, True), (prop['name'], prop['image_id'], prop['deleted'])) self.context.auth_token = 'user:%s:user' % TENANT2 members = self.db_api.image_member_find(self.context, IMG_ID) self.assertEqual([], members) tags = self.db_api.image_tag_get_all(self.context, IMG_ID) self.assertEqual([], tags) def test_image_destroy_with_delete_all(self): """Check the image child element's _image_delete_all methods. checks if all the image_delete_all methods deletes only the child elements of the image to be deleted. """ TENANT2 = str(uuid.uuid4()) location_data = [{'url': 'a', 'metadata': {'key': 'value'}, 'status': 'active'}, {'url': 'b', 'metadata': {}, 'status': 'active'}] def _create_image_with_child_entries(): fixture = {'status': 'queued', 'locations': location_data} image_id = self.db_api.image_create(self.context, fixture)['id'] fixture = {'name': 'ping', 'value': 'pong', 'image_id': image_id} self.db_api.image_property_create(self.context, fixture) fixture = {'image_id': image_id, 'member': TENANT2, 'can_share': False} self.db_api.image_member_create(self.context, fixture) self.db_api.image_tag_create(self.context, image_id, 'snarf') return image_id ACTIVE_IMG_ID = _create_image_with_child_entries() DEL_IMG_ID = _create_image_with_child_entries() deleted_image = self.db_api.image_destroy(self.adm_context, DEL_IMG_ID) self.assertTrue(deleted_image['deleted']) self.assertTrue(deleted_image['deleted_at']) self.assertRaises(exception.NotFound, self.db_api.image_get, self.context, DEL_IMG_ID) active_image = self.db_api.image_get(self.context, ACTIVE_IMG_ID) self.assertFalse(active_image['deleted']) self.assertFalse(active_image['deleted_at']) self.assertEqual(2, len(active_image['locations'])) self.assertIn('id', active_image['locations'][0]) self.assertIn('id', active_image['locations'][1]) active_image['locations'][0].pop('id') active_image['locations'][1].pop('id') self.assertEqual(location_data, active_image['locations']) self.assertEqual(1, len(active_image['properties'])) prop = active_image['properties'][0] self.assertEqual(('ping', 'pong', ACTIVE_IMG_ID), (prop['name'], prop['value'], prop['image_id'])) self.assertEqual((False, None), (prop['deleted'], prop['deleted_at'])) self.context.auth_token = 'user:%s:user' % TENANT2 members = self.db_api.image_member_find(self.context, ACTIVE_IMG_ID) self.assertEqual(1, len(members)) member = members[0] self.assertEqual((TENANT2, ACTIVE_IMG_ID, False), (member['member'], member['image_id'], member['can_share'])) tags = self.db_api.image_tag_get_all(self.context, ACTIVE_IMG_ID) self.assertEqual(['snarf'], tags) def test_image_get_multiple_members(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, auth_token='user:%s:user' % TENANT2, owner_is_tenant=False) UUIDX = str(uuid.uuid4()) # We need private image and context.owner should not match image # owner self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) values = {'image_id': UUIDX, 'member': TENANT2, 'can_share': False} self.db_api.image_member_create(ctxt1, values) image = self.db_api.image_get(ctxt2, UUIDX) self.assertEqual(UUIDX, image['id']) # by default get_all displays only images with status 'accepted' images = self.db_api.image_get_all(ctxt2) self.assertEqual(3, len(images)) # filter by rejected images = self.db_api.image_get_all(ctxt2, member_status='rejected') self.assertEqual(3, len(images)) # filter by visibility images = self.db_api.image_get_all(ctxt2, filters={'visibility': 'shared'}) self.assertEqual(0, len(images)) # filter by visibility images = self.db_api.image_get_all(ctxt2, member_status='pending', filters={'visibility': 'shared'}) self.assertEqual(1, len(images)) # filter by visibility images = self.db_api.image_get_all(ctxt2, member_status='all', filters={'visibility': 'shared'}) self.assertEqual(1, len(images)) # filter by status pending images = self.db_api.image_get_all(ctxt2, member_status='pending') self.assertEqual(4, len(images)) # filter by status all images = self.db_api.image_get_all(ctxt2, member_status='all') self.assertEqual(4, len(images)) def test_is_image_visible(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1, owner_is_tenant=True) ctxt2 = context.RequestContext(is_admin=False, user=TENANT2, auth_token='user:%s:user' % TENANT2, owner_is_tenant=False) UUIDX = str(uuid.uuid4()) # We need private image and context.owner should not match image # owner image = self.db_api.image_create(ctxt1, {'id': UUIDX, 'status': 'queued', 'is_public': False, 'owner': TENANT1}) values = {'image_id': UUIDX, 'member': TENANT2, 'can_share': False} self.db_api.image_member_create(ctxt1, values) result = self.db_api.is_image_visible(ctxt2, image) self.assertTrue(result) # image should not be visible for a deleted member members = self.db_api.image_member_find(ctxt1, image_id=UUIDX) self.db_api.image_member_delete(ctxt1, members[0]['id']) result = self.db_api.is_image_visible(ctxt2, image) self.assertFalse(result) def test_image_tag_create(self): tag = self.db_api.image_tag_create(self.context, UUID1, 'snap') self.assertEqual('snap', tag) def test_image_tag_create_bad_value(self): self.assertRaises(exception.Invalid, self.db_api.image_tag_create, self.context, UUID1, u'Bad \U0001f62a') def test_image_tag_set_all(self): tags = self.db_api.image_tag_get_all(self.context, UUID1) self.assertEqual([], tags) self.db_api.image_tag_set_all(self.context, UUID1, ['ping', 'pong']) tags = self.db_api.image_tag_get_all(self.context, UUID1) # NOTE(bcwaldon): tag ordering should match exactly what was provided self.assertEqual(['ping', 'pong'], tags) def test_image_tag_get_all(self): self.db_api.image_tag_create(self.context, UUID1, 'snap') self.db_api.image_tag_create(self.context, UUID1, 'snarf') self.db_api.image_tag_create(self.context, UUID2, 'snarf') # Check the tags for the first image tags = self.db_api.image_tag_get_all(self.context, UUID1) expected = ['snap', 'snarf'] self.assertEqual(expected, tags) # Check the tags for the second image tags = self.db_api.image_tag_get_all(self.context, UUID2) expected = ['snarf'] self.assertEqual(expected, tags) def test_image_tag_get_all_no_tags(self): actual = self.db_api.image_tag_get_all(self.context, UUID1) self.assertEqual([], actual) def test_image_tag_get_all_non_existent_image(self): bad_image_id = str(uuid.uuid4()) actual = self.db_api.image_tag_get_all(self.context, bad_image_id) self.assertEqual([], actual) def test_image_tag_delete(self): self.db_api.image_tag_create(self.context, UUID1, 'snap') self.db_api.image_tag_delete(self.context, UUID1, 'snap') self.assertRaises(exception.NotFound, self.db_api.image_tag_delete, self.context, UUID1, 'snap') @mock.patch.object(timeutils, 'utcnow') def test_image_member_create(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() memberships = self.db_api.image_member_find(self.context) self.assertEqual([], memberships) TENANT1 = str(uuid.uuid4()) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) memberships = self.db_api.image_member_find(self.context) self.assertEqual(1, len(memberships)) actual = memberships[0] self.assertIsNotNone(actual['created_at']) self.assertIsNotNone(actual['updated_at']) actual.pop('id') actual.pop('created_at') actual.pop('updated_at') expected = { 'member': TENANT1, 'image_id': UUID1, 'can_share': False, 'status': 'pending', 'deleted': False, } self.assertEqual(expected, actual) def test_image_member_update(self): TENANT1 = str(uuid.uuid4()) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 member = self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) member_id = member.pop('id') member.pop('created_at') member.pop('updated_at') expected = {'member': TENANT1, 'image_id': UUID1, 'status': 'pending', 'can_share': False, 'deleted': False} self.assertEqual(expected, member) member = self.db_api.image_member_update(self.context, member_id, {'can_share': True}) self.assertNotEqual(member['created_at'], member['updated_at']) member.pop('id') member.pop('created_at') member.pop('updated_at') expected = {'member': TENANT1, 'image_id': UUID1, 'status': 'pending', 'can_share': True, 'deleted': False} self.assertEqual(expected, member) members = self.db_api.image_member_find(self.context, member=TENANT1, image_id=UUID1) member = members[0] member.pop('id') member.pop('created_at') member.pop('updated_at') self.assertEqual(expected, member) def test_image_member_update_status(self): TENANT1 = str(uuid.uuid4()) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 member = self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) member_id = member.pop('id') member.pop('created_at') member.pop('updated_at') expected = {'member': TENANT1, 'image_id': UUID1, 'status': 'pending', 'can_share': False, 'deleted': False} self.assertEqual(expected, member) member = self.db_api.image_member_update(self.context, member_id, {'status': 'accepted'}) self.assertNotEqual(member['created_at'], member['updated_at']) member.pop('id') member.pop('created_at') member.pop('updated_at') expected = {'member': TENANT1, 'image_id': UUID1, 'status': 'accepted', 'can_share': False, 'deleted': False} self.assertEqual(expected, member) members = self.db_api.image_member_find(self.context, member=TENANT1, image_id=UUID1) member = members[0] member.pop('id') member.pop('created_at') member.pop('updated_at') self.assertEqual(expected, member) def test_image_member_find(self): TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) fixtures = [ {'member': TENANT1, 'image_id': UUID1}, {'member': TENANT1, 'image_id': UUID2, 'status': 'rejected'}, {'member': TENANT2, 'image_id': UUID1, 'status': 'accepted'}, ] for f in fixtures: self.db_api.image_member_create(self.context, copy.deepcopy(f)) def _simplify(output): return def _assertMemberListMatch(list1, list2): _simple = lambda x: set([(o['member'], o['image_id']) for o in x]) self.assertEqual(_simple(list1), _simple(list2)) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 output = self.db_api.image_member_find(self.context, member=TENANT1) _assertMemberListMatch([fixtures[0], fixtures[1]], output) output = self.db_api.image_member_find(self.adm_context, image_id=UUID1) _assertMemberListMatch([fixtures[0], fixtures[2]], output) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT2 output = self.db_api.image_member_find(self.context, member=TENANT2, image_id=UUID1) _assertMemberListMatch([fixtures[2]], output) output = self.db_api.image_member_find(self.context, status='accepted') _assertMemberListMatch([fixtures[2]], output) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 output = self.db_api.image_member_find(self.context, status='rejected') _assertMemberListMatch([fixtures[1]], output) output = self.db_api.image_member_find(self.context, status='pending') _assertMemberListMatch([fixtures[0]], output) output = self.db_api.image_member_find(self.context, status='pending', image_id=UUID2) _assertMemberListMatch([], output) image_id = str(uuid.uuid4()) output = self.db_api.image_member_find(self.context, member=TENANT2, image_id=image_id) _assertMemberListMatch([], output) def test_image_member_count(self): TENANT1 = str(uuid.uuid4()) self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) actual = self.db_api.image_member_count(self.context, UUID1) self.assertEqual(1, actual) def test_image_member_count_invalid_image_id(self): TENANT1 = str(uuid.uuid4()) self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) self.assertRaises(exception.Invalid, self.db_api.image_member_count, self.context, None) def test_image_member_count_empty_image_id(self): TENANT1 = str(uuid.uuid4()) self.db_api.image_member_create(self.context, {'member': TENANT1, 'image_id': UUID1}) self.assertRaises(exception.Invalid, self.db_api.image_member_count, self.context, "") def test_image_member_delete(self): TENANT1 = str(uuid.uuid4()) # NOTE(flaper87): Update auth token, otherwise # non visible members won't be returned. self.context.auth_token = 'user:%s:user' % TENANT1 fixture = {'member': TENANT1, 'image_id': UUID1, 'can_share': True} member = self.db_api.image_member_create(self.context, fixture) self.assertEqual(1, len(self.db_api.image_member_find(self.context))) member = self.db_api.image_member_delete(self.context, member['id']) self.assertEqual(0, len(self.db_api.image_member_find(self.context))) class DriverQuotaTests(test_utils.BaseTestCase): def setUp(self): super(DriverQuotaTests, self).setUp() self.owner_id1 = str(uuid.uuid4()) self.context1 = context.RequestContext( is_admin=False, user=self.owner_id1, tenant=self.owner_id1, auth_token='%s:%s:user' % (self.owner_id1, self.owner_id1)) self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) dt1 = timeutils.utcnow() dt2 = dt1 + datetime.timedelta(microseconds=5) fixtures = [ { 'id': UUID1, 'created_at': dt1, 'updated_at': dt1, 'size': 13, 'owner': self.owner_id1, }, { 'id': UUID2, 'created_at': dt1, 'updated_at': dt2, 'size': 17, 'owner': self.owner_id1, }, { 'id': UUID3, 'created_at': dt2, 'updated_at': dt2, 'size': 7, 'owner': self.owner_id1, }, ] self.owner1_fixtures = [ build_image_fixture(**fixture) for fixture in fixtures] for fixture in self.owner1_fixtures: self.db_api.image_create(self.context1, fixture) def test_storage_quota(self): total = reduce(lambda x, y: x + y, [f['size'] for f in self.owner1_fixtures]) x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) self.assertEqual(total, x) def test_storage_quota_without_image_id(self): total = reduce(lambda x, y: x + y, [f['size'] for f in self.owner1_fixtures]) total = total - self.owner1_fixtures[0]['size'] x = self.db_api.user_get_storage_usage( self.context1, self.owner_id1, image_id=self.owner1_fixtures[0]['id']) self.assertEqual(total, x) def test_storage_quota_multiple_locations(self): dt1 = timeutils.utcnow() sz = 53 new_fixture_dict = {'id': str(uuid.uuid4()), 'created_at': dt1, 'updated_at': dt1, 'size': sz, 'owner': self.owner_id1} new_fixture = build_image_fixture(**new_fixture_dict) new_fixture['locations'].append({'url': 'file:///some/path/file', 'metadata': {}, 'status': 'active'}) self.db_api.image_create(self.context1, new_fixture) total = reduce(lambda x, y: x + y, [f['size'] for f in self.owner1_fixtures]) + (sz * 2) x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) self.assertEqual(total, x) def test_storage_quota_deleted_image(self): # NOTE(flaper87): This needs to be tested for # soft deleted images as well. Currently there's no # good way to delete locations. dt1 = timeutils.utcnow() sz = 53 image_id = str(uuid.uuid4()) new_fixture_dict = {'id': image_id, 'created_at': dt1, 'updated_at': dt1, 'size': sz, 'owner': self.owner_id1} new_fixture = build_image_fixture(**new_fixture_dict) new_fixture['locations'].append({'url': 'file:///some/path/file', 'metadata': {}, 'status': 'active'}) self.db_api.image_create(self.context1, new_fixture) total = reduce(lambda x, y: x + y, [f['size'] for f in self.owner1_fixtures]) x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) self.assertEqual(total + (sz * 2), x) self.db_api.image_destroy(self.context1, image_id) x = self.db_api.user_get_storage_usage(self.context1, self.owner_id1) self.assertEqual(total, x) class TaskTests(test_utils.BaseTestCase): def setUp(self): super(TaskTests, self).setUp() self.owner_id = str(uuid.uuid4()) self.adm_context = context.RequestContext(is_admin=True, auth_token='user:user:admin') self.context = context.RequestContext( is_admin=False, auth_token='user:user:user', user=self.owner_id) self.db_api = db_tests.get_db(self.config) self.fixtures = self.build_task_fixtures() db_tests.reset_db(self.db_api) def build_task_fixtures(self): self.context.tenant = str(uuid.uuid4()) fixtures = [ { 'owner': self.context.owner, 'type': 'import', 'input': {'import_from': 'file:///a.img', 'import_from_format': 'qcow2', 'image_properties': { "name": "GreatStack 1.22", "tags": ["lamp", "custom"] }}, }, { 'owner': self.context.owner, 'type': 'import', 'input': {'import_from': 'file:///b.img', 'import_from_format': 'qcow2', 'image_properties': { "name": "GreatStack 1.23", "tags": ["lamp", "good"] }}, }, { 'owner': self.context.owner, "type": "export", "input": { "export_uuid": "deadbeef-dead-dead-dead-beefbeefbeef", "export_to": "swift://cloud.foo/myaccount/mycontainer/path", "export_format": "qcow2" } }, ] return [build_task_fixture(**fixture) for fixture in fixtures] def test_task_get_all_with_filter(self): for fixture in self.fixtures: self.db_api.task_create(self.adm_context, build_task_fixture(**fixture)) import_tasks = self.db_api.task_get_all(self.adm_context, filters={'type': 'import'}) self.assertTrue(import_tasks) self.assertEqual(2, len(import_tasks)) for task in import_tasks: self.assertEqual('import', task['type']) self.assertEqual(self.context.owner, task['owner']) def test_task_get_all_as_admin(self): tasks = [] for fixture in self.fixtures: task = self.db_api.task_create(self.adm_context, build_task_fixture(**fixture)) tasks.append(task) import_tasks = self.db_api.task_get_all(self.adm_context) self.assertTrue(import_tasks) self.assertEqual(3, len(import_tasks)) def test_task_get_all_marker(self): for fixture in self.fixtures: self.db_api.task_create(self.adm_context, build_task_fixture(**fixture)) tasks = self.db_api.task_get_all(self.adm_context, sort_key='id') task_ids = [t['id'] for t in tasks] tasks = self.db_api.task_get_all(self.adm_context, sort_key='id', marker=task_ids[0]) self.assertEqual(2, len(tasks)) def test_task_get_all_limit(self): for fixture in self.fixtures: self.db_api.task_create(self.adm_context, build_task_fixture(**fixture)) tasks = self.db_api.task_get_all(self.adm_context, limit=2) self.assertEqual(2, len(tasks)) # A limit of None should not equate to zero tasks = self.db_api.task_get_all(self.adm_context, limit=None) self.assertEqual(3, len(tasks)) # A limit of zero should actually mean zero tasks = self.db_api.task_get_all(self.adm_context, limit=0) self.assertEqual(0, len(tasks)) def test_task_get_all_owned(self): TENANT1 = str(uuid.uuid4()) ctxt1 = context.RequestContext(is_admin=False, tenant=TENANT1, auth_token='user:%s:user' % TENANT1) task_values = {'type': 'import', 'status': 'pending', 'input': '{"loc": "fake"}', 'owner': TENANT1} self.db_api.task_create(ctxt1, task_values) TENANT2 = str(uuid.uuid4()) ctxt2 = context.RequestContext(is_admin=False, tenant=TENANT2, auth_token='user:%s:user' % TENANT2) task_values = {'type': 'export', 'status': 'pending', 'input': '{"loc": "fake"}', 'owner': TENANT2} self.db_api.task_create(ctxt2, task_values) tasks = self.db_api.task_get_all(ctxt1) task_owners = set([task['owner'] for task in tasks]) expected = set([TENANT1]) self.assertEqual(sorted(expected), sorted(task_owners)) def test_task_get(self): expires_at = timeutils.utcnow() image_id = str(uuid.uuid4()) fixture = { 'owner': self.context.owner, 'type': 'import', 'status': 'pending', 'input': '{"loc": "fake"}', 'result': "{'image_id': %s}" % image_id, 'message': 'blah', 'expires_at': expires_at } task = self.db_api.task_create(self.adm_context, fixture) self.assertIsNotNone(task) self.assertIsNotNone(task['id']) task_id = task['id'] task = self.db_api.task_get(self.adm_context, task_id) self.assertIsNotNone(task) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('import', task['type']) self.assertEqual('pending', task['status']) self.assertEqual(fixture['input'], task['input']) self.assertEqual(fixture['result'], task['result']) self.assertEqual(fixture['message'], task['message']) self.assertEqual(expires_at, task['expires_at']) def test_task_get_all(self): now = timeutils.utcnow() image_id = str(uuid.uuid4()) fixture1 = { 'owner': self.context.owner, 'type': 'import', 'status': 'pending', 'input': '{"loc": "fake_1"}', 'result': "{'image_id': %s}" % image_id, 'message': 'blah_1', 'expires_at': now, 'created_at': now, 'updated_at': now } fixture2 = { 'owner': self.context.owner, 'type': 'import', 'status': 'pending', 'input': '{"loc": "fake_2"}', 'result': "{'image_id': %s}" % image_id, 'message': 'blah_2', 'expires_at': now, 'created_at': now, 'updated_at': now } task1 = self.db_api.task_create(self.adm_context, fixture1) task2 = self.db_api.task_create(self.adm_context, fixture2) self.assertIsNotNone(task1) self.assertIsNotNone(task2) task1_id = task1['id'] task2_id = task2['id'] task_fixtures = {task1_id: fixture1, task2_id: fixture2} tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(2, len(tasks)) self.assertEqual(set((tasks[0]['id'], tasks[1]['id'])), set((task1_id, task2_id))) for task in tasks: fixture = task_fixtures[task['id']] self.assertEqual(self.context.owner, task['owner']) self.assertEqual(fixture['type'], task['type']) self.assertEqual(fixture['status'], task['status']) self.assertEqual(fixture['expires_at'], task['expires_at']) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) self.assertEqual(fixture['created_at'], task['created_at']) self.assertEqual(fixture['updated_at'], task['updated_at']) task_details_keys = ['input', 'message', 'result'] for key in task_details_keys: self.assertNotIn(key, task) def test_task_create(self): task_id = str(uuid.uuid4()) self.context.tenant = self.context.owner values = { 'id': task_id, 'owner': self.context.owner, 'type': 'export', 'status': 'pending', } task_values = build_task_fixture(**values) task = self.db_api.task_create(self.adm_context, task_values) self.assertIsNotNone(task) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('export', task['type']) self.assertEqual('pending', task['status']) self.assertEqual({'ping': 'pong'}, task['input']) def test_task_create_with_all_task_info_null(self): task_id = str(uuid.uuid4()) self.context.tenant = str(uuid.uuid4()) values = { 'id': task_id, 'owner': self.context.owner, 'type': 'export', 'status': 'pending', 'input': None, 'result': None, 'message': None, } task_values = build_task_fixture(**values) task = self.db_api.task_create(self.adm_context, task_values) self.assertIsNotNone(task) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('export', task['type']) self.assertEqual('pending', task['status']) self.assertIsNone(task['input']) self.assertIsNone(task['result']) self.assertIsNone(task['message']) def test_task_update(self): self.context.tenant = str(uuid.uuid4()) result = {'foo': 'bar'} task_values = build_task_fixture(owner=self.context.owner, result=result) task = self.db_api.task_create(self.adm_context, task_values) task_id = task['id'] fixture = { 'status': 'processing', 'message': 'This is a error string', } task = self.db_api.task_update(self.adm_context, task_id, fixture) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('import', task['type']) self.assertEqual('processing', task['status']) self.assertEqual({'ping': 'pong'}, task['input']) self.assertEqual(result, task['result']) self.assertEqual('This is a error string', task['message']) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) self.assertIsNone(task['expires_at']) self.assertEqual(task_values['created_at'], task['created_at']) self.assertGreater(task['updated_at'], task['created_at']) def test_task_update_with_all_task_info_null(self): self.context.tenant = str(uuid.uuid4()) task_values = build_task_fixture(owner=self.context.owner, input=None, result=None, message=None) task = self.db_api.task_create(self.adm_context, task_values) task_id = task['id'] fixture = {'status': 'processing'} task = self.db_api.task_update(self.adm_context, task_id, fixture) self.assertEqual(task_id, task['id']) self.assertEqual(self.context.owner, task['owner']) self.assertEqual('import', task['type']) self.assertEqual('processing', task['status']) self.assertIsNone(task['input']) self.assertIsNone(task['result']) self.assertIsNone(task['message']) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) self.assertIsNone(task['expires_at']) self.assertEqual(task_values['created_at'], task['created_at']) self.assertGreater(task['updated_at'], task['created_at']) def test_task_delete(self): task_values = build_task_fixture(owner=self.context.owner) task = self.db_api.task_create(self.adm_context, task_values) self.assertIsNotNone(task) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) task_id = task['id'] self.db_api.task_delete(self.adm_context, task_id) self.assertRaises(exception.TaskNotFound, self.db_api.task_get, self.context, task_id) def test_task_delete_as_admin(self): task_values = build_task_fixture(owner=self.context.owner) task = self.db_api.task_create(self.adm_context, task_values) self.assertIsNotNone(task) self.assertFalse(task['deleted']) self.assertIsNone(task['deleted_at']) task_id = task['id'] self.db_api.task_delete(self.adm_context, task_id) del_task = self.db_api.task_get(self.adm_context, task_id, force_show_deleted=True) self.assertIsNotNone(del_task) self.assertEqual(task_id, del_task['id']) self.assertTrue(del_task['deleted']) self.assertIsNotNone(del_task['deleted_at']) class DBPurgeTests(test_utils.BaseTestCase): def setUp(self): super(DBPurgeTests, self).setUp() self.adm_context = context.get_admin_context(show_deleted=True) self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self.image_fixtures, self.task_fixtures = self.build_fixtures() self.create_tasks(self.task_fixtures) self.create_images(self.image_fixtures) def build_fixtures(self): dt1 = timeutils.utcnow() - datetime.timedelta(days=5) dt2 = dt1 + datetime.timedelta(days=1) dt3 = dt2 + datetime.timedelta(days=1) fixtures = [ { 'created_at': dt1, 'updated_at': dt1, 'deleted_at': dt3, 'deleted': True, }, { 'created_at': dt1, 'updated_at': dt2, 'deleted_at': timeutils.utcnow(), 'deleted': True, }, { 'created_at': dt2, 'updated_at': dt2, 'deleted_at': None, 'deleted': False, }, ] return ( [build_image_fixture(**fixture) for fixture in fixtures], [build_task_fixture(**fixture) for fixture in fixtures], ) def create_images(self, images): for fixture in images: self.db_api.image_create(self.adm_context, fixture) def create_tasks(self, tasks): for fixture in tasks: self.db_api.task_create(self.adm_context, fixture) def test_db_purge(self): self.db_api.purge_deleted_rows(self.adm_context, 1, 5) images = self.db_api.image_get_all(self.adm_context) self.assertEqual(len(images), 2) tasks = self.db_api.task_get_all(self.adm_context) self.assertEqual(len(tasks), 2) class TestVisibility(test_utils.BaseTestCase): def setUp(self): super(TestVisibility, self).setUp() self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self.setup_tenants() self.setup_contexts() self.fixtures = self.build_image_fixtures() self.create_images(self.fixtures) def setup_tenants(self): self.admin_tenant = str(uuid.uuid4()) self.tenant1 = str(uuid.uuid4()) self.tenant2 = str(uuid.uuid4()) def setup_contexts(self): self.admin_context = context.RequestContext( is_admin=True, tenant=self.admin_tenant) self.admin_none_context = context.RequestContext( is_admin=True, tenant=None) self.tenant1_context = context.RequestContext(tenant=self.tenant1) self.tenant2_context = context.RequestContext(tenant=self.tenant2) self.none_context = context.RequestContext(tenant=None) def build_image_fixtures(self): fixtures = [] owners = { 'Unowned': None, 'Admin Tenant': self.admin_tenant, 'Tenant 1': self.tenant1, 'Tenant 2': self.tenant2, } visibilities = {'public': True, 'private': False} for owner_label, owner in owners.items(): for visibility, is_public in visibilities.items(): fixture = { 'name': '%s, %s' % (owner_label, visibility), 'owner': owner, 'is_public': is_public, } fixtures.append(fixture) return [build_image_fixture(**f) for f in fixtures] def create_images(self, images): for fixture in images: self.db_api.image_create(self.admin_context, fixture) class VisibilityTests(object): def test_unknown_admin_sees_all(self): images = self.db_api.image_get_all(self.admin_none_context) self.assertEqual(8, len(images)) def test_unknown_admin_is_public_true(self): images = self.db_api.image_get_all(self.admin_none_context, is_public=True) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_unknown_admin_is_public_false(self): images = self.db_api.image_get_all(self.admin_none_context, is_public=False) self.assertEqual(4, len(images)) for i in images: self.assertFalse(i['is_public']) def test_unknown_admin_is_public_none(self): images = self.db_api.image_get_all(self.admin_none_context) self.assertEqual(8, len(images)) def test_unknown_admin_visibility_public(self): images = self.db_api.image_get_all(self.admin_none_context, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_unknown_admin_visibility_private(self): images = self.db_api.image_get_all(self.admin_none_context, filters={'visibility': 'private'}) self.assertEqual(4, len(images)) for i in images: self.assertFalse(i['is_public']) def test_known_admin_sees_all(self): images = self.db_api.image_get_all(self.admin_context) self.assertEqual(8, len(images)) def test_known_admin_is_public_true(self): images = self.db_api.image_get_all(self.admin_context, is_public=True) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_known_admin_is_public_false(self): images = self.db_api.image_get_all(self.admin_context, is_public=False) self.assertEqual(4, len(images)) for i in images: self.assertFalse(i['is_public']) def test_known_admin_is_public_none(self): images = self.db_api.image_get_all(self.admin_context) self.assertEqual(8, len(images)) def test_admin_as_user_true(self): images = self.db_api.image_get_all(self.admin_context, admin_as_user=True) self.assertEqual(5, len(images)) for i in images: self.assertTrue(i['is_public'] or i['owner'] == self.admin_tenant) def test_known_admin_visibility_public(self): images = self.db_api.image_get_all(self.admin_context, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_known_admin_visibility_private(self): images = self.db_api.image_get_all(self.admin_context, filters={'visibility': 'private'}) self.assertEqual(4, len(images)) for i in images: self.assertFalse(i['is_public']) def test_what_unknown_user_sees(self): images = self.db_api.image_get_all(self.none_context) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_unknown_user_is_public_true(self): images = self.db_api.image_get_all(self.none_context, is_public=True) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_unknown_user_is_public_false(self): images = self.db_api.image_get_all(self.none_context, is_public=False) self.assertEqual(0, len(images)) def test_unknown_user_is_public_none(self): images = self.db_api.image_get_all(self.none_context) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_unknown_user_visibility_public(self): images = self.db_api.image_get_all(self.none_context, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_unknown_user_visibility_private(self): images = self.db_api.image_get_all(self.none_context, filters={'visibility': 'private'}) self.assertEqual(0, len(images)) def test_what_tenant1_sees(self): images = self.db_api.image_get_all(self.tenant1_context) self.assertEqual(5, len(images)) for i in images: if not i['is_public']: self.assertEqual(i['owner'], self.tenant1) def test_tenant1_is_public_true(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=True) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_tenant1_is_public_false(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=False) self.assertEqual(1, len(images)) self.assertFalse(images[0]['is_public']) self.assertEqual(images[0]['owner'], self.tenant1) def test_tenant1_is_public_none(self): images = self.db_api.image_get_all(self.tenant1_context) self.assertEqual(5, len(images)) for i in images: if not i['is_public']: self.assertEqual(self.tenant1, i['owner']) def test_tenant1_visibility_public(self): images = self.db_api.image_get_all(self.tenant1_context, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) for i in images: self.assertTrue(i['is_public']) def test_tenant1_visibility_private(self): images = self.db_api.image_get_all(self.tenant1_context, filters={'visibility': 'private'}) self.assertEqual(1, len(images)) self.assertFalse(images[0]['is_public']) self.assertEqual(self.tenant1, images[0]['owner']) def _setup_is_public_red_herring(self): values = { 'name': 'Red Herring', 'owner': self.tenant1, 'is_public': False, 'properties': {'is_public': 'silly'} } fixture = build_image_fixture(**values) self.db_api.image_create(self.admin_context, fixture) def test_is_public_is_a_normal_filter_for_admin(self): self._setup_is_public_red_herring() images = self.db_api.image_get_all(self.admin_context, filters={'is_public': 'silly'}) self.assertEqual(1, len(images)) self.assertEqual('Red Herring', images[0]['name']) def test_is_public_is_a_normal_filter_for_user(self): self._setup_is_public_red_herring() images = self.db_api.image_get_all(self.tenant1_context, filters={'is_public': 'silly'}) self.assertEqual(1, len(images)) self.assertEqual('Red Herring', images[0]['name']) # NOTE(markwash): the following tests are sanity checks to make sure # visibility filtering and is_public=(True|False) do not interact in # unexpected ways. However, using both of the filtering techniques # simultaneously is not an anticipated use case. def test_admin_is_public_true_and_visibility_public(self): images = self.db_api.image_get_all(self.admin_context, is_public=True, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) def test_admin_is_public_false_and_visibility_public(self): images = self.db_api.image_get_all(self.admin_context, is_public=False, filters={'visibility': 'public'}) self.assertEqual(0, len(images)) def test_admin_is_public_true_and_visibility_private(self): images = self.db_api.image_get_all(self.admin_context, is_public=True, filters={'visibility': 'private'}) self.assertEqual(0, len(images)) def test_admin_is_public_false_and_visibility_private(self): images = self.db_api.image_get_all(self.admin_context, is_public=False, filters={'visibility': 'private'}) self.assertEqual(4, len(images)) def test_tenant1_is_public_true_and_visibility_public(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=True, filters={'visibility': 'public'}) self.assertEqual(4, len(images)) def test_tenant1_is_public_false_and_visibility_public(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=False, filters={'visibility': 'public'}) self.assertEqual(0, len(images)) def test_tenant1_is_public_true_and_visibility_private(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=True, filters={'visibility': 'private'}) self.assertEqual(0, len(images)) def test_tenant1_is_public_false_and_visibility_private(self): images = self.db_api.image_get_all(self.tenant1_context, is_public=False, filters={'visibility': 'private'}) self.assertEqual(1, len(images)) class TestMembershipVisibility(test_utils.BaseTestCase): def setUp(self): super(TestMembershipVisibility, self).setUp() self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) self._create_contexts() self._create_images() def _create_contexts(self): self.owner1, self.owner1_ctx = self._user_fixture() self.owner2, self.owner2_ctx = self._user_fixture() self.tenant1, self.user1_ctx = self._user_fixture() self.tenant2, self.user2_ctx = self._user_fixture() self.tenant3, self.user3_ctx = self._user_fixture() self.admin_tenant, self.admin_ctx = self._user_fixture(admin=True) def _user_fixture(self, admin=False): tenant_id = str(uuid.uuid4()) ctx = context.RequestContext(tenant=tenant_id, is_admin=admin) return tenant_id, ctx def _create_images(self): self.image_ids = {} for owner in [self.owner1, self.owner2]: self._create_image('not_shared', owner) self._create_image('shared-with-1', owner, members=[self.tenant1]) self._create_image('shared-with-2', owner, members=[self.tenant2]) self._create_image('shared-with-both', owner, members=[self.tenant1, self.tenant2]) def _create_image(self, name, owner, members=None): image = build_image_fixture(name=name, owner=owner, is_public=False) self.image_ids[(owner, name)] = image['id'] self.db_api.image_create(self.admin_ctx, image) for member in members or []: member = {'image_id': image['id'], 'member': member} self.db_api.image_member_create(self.admin_ctx, member) class MembershipVisibilityTests(object): def _check_by_member(self, ctx, member_id, expected): members = self.db_api.image_member_find(ctx, member=member_id) images = [self.db_api.image_get(self.admin_ctx, member['image_id']) for member in members] facets = [(image['owner'], image['name']) for image in images] self.assertEqual(set(expected), set(facets)) def test_owner1_finding_user1_memberships(self): """Owner1 should see images it owns that are shared with User1.""" expected = [ (self.owner1, 'shared-with-1'), (self.owner1, 'shared-with-both'), ] self._check_by_member(self.owner1_ctx, self.tenant1, expected) def test_user1_finding_user1_memberships(self): """User1 should see all images shared with User1 """ expected = [ (self.owner1, 'shared-with-1'), (self.owner1, 'shared-with-both'), (self.owner2, 'shared-with-1'), (self.owner2, 'shared-with-both'), ] self._check_by_member(self.user1_ctx, self.tenant1, expected) def test_user2_finding_user1_memberships(self): """User2 should see no images shared with User1 """ expected = [] self._check_by_member(self.user2_ctx, self.tenant1, expected) def test_admin_finding_user1_memberships(self): """Admin should see all images shared with User1 """ expected = [ (self.owner1, 'shared-with-1'), (self.owner1, 'shared-with-both'), (self.owner2, 'shared-with-1'), (self.owner2, 'shared-with-both'), ] self._check_by_member(self.admin_ctx, self.tenant1, expected) def _check_by_image(self, context, image_id, expected): members = self.db_api.image_member_find(context, image_id=image_id) member_ids = [member['member'] for member in members] self.assertEqual(set(expected), set(member_ids)) def test_owner1_finding_owner1s_image_members(self): """Owner1 should see all memberships of its image """ expected = [self.tenant1, self.tenant2] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.owner1_ctx, image_id, expected) def test_admin_finding_owner1s_image_members(self): """Admin should see all memberships of owner1's image """ expected = [self.tenant1, self.tenant2] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.admin_ctx, image_id, expected) def test_user1_finding_owner1s_image_members(self): """User1 should see its own membership of owner1's image """ expected = [self.tenant1] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.user1_ctx, image_id, expected) def test_user2_finding_owner1s_image_members(self): """User2 should see its own membership of owner1's image """ expected = [self.tenant2] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.user2_ctx, image_id, expected) def test_user3_finding_owner1s_image_members(self): """User3 should see no memberships of owner1's image """ expected = [] image_id = self.image_ids[(self.owner1, 'shared-with-both')] self._check_by_image(self.user3_ctx, image_id, expected) glance-12.0.0/glance/tests/functional/db/base_metadef.py0000664000567000056710000007220312701407047024265 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from glance.common import config from glance.common import exception from glance import context import glance.tests.functional.db as db_tests from glance.tests import utils as test_utils def build_namespace_fixture(**kwargs): namespace = { 'namespace': u'MyTestNamespace', 'display_name': u'test-display-name', 'description': u'test-description', 'visibility': u'public', 'protected': 0, 'owner': u'test-owner' } namespace.update(kwargs) return namespace def build_resource_type_fixture(**kwargs): resource_type = { 'name': u'MyTestResourceType', 'protected': 0 } resource_type.update(kwargs) return resource_type def build_association_fixture(**kwargs): association = { 'name': u'MyTestResourceType', 'properties_target': 'test-properties-target', 'prefix': 'test-prefix' } association.update(kwargs) return association def build_object_fixture(**kwargs): # Full testing of required and schema done via rest api tests object = { 'namespace_id': 1, 'name': u'test-object-name', 'description': u'test-object-description', 'required': u'fake-required-properties-list', 'json_schema': u'{fake-schema}' } object.update(kwargs) return object def build_property_fixture(**kwargs): # Full testing of required and schema done via rest api tests property = { 'namespace_id': 1, 'name': u'test-property-name', 'json_schema': u'{fake-schema}' } property.update(kwargs) return property def build_tag_fixture(**kwargs): # Full testing of required and schema done via rest api tests tag = { 'namespace_id': 1, 'name': u'test-tag-name', } tag.update(kwargs) return tag def build_tags_fixture(tag_name_list): tag_list = [] for tag_name in tag_name_list: tag_list.append({'name': tag_name}) return tag_list class TestMetadefDriver(test_utils.BaseTestCase): """Test Driver class for Metadef tests.""" def setUp(self): """Run before each test method to initialize test environment.""" super(TestMetadefDriver, self).setUp() config.parse_args(args=[]) context_cls = context.RequestContext self.adm_context = context_cls(is_admin=True, auth_token='user:user:admin') self.context = context_cls(is_admin=False, auth_token='user:user:user') self.db_api = db_tests.get_db(self.config) db_tests.reset_db(self.db_api) def _assert_saved_fields(self, expected, actual): for k in expected.keys(): self.assertEqual(expected[k], actual[k]) class MetadefNamespaceTests(object): def test_namespace_create(self): fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created) self._assert_saved_fields(fixture, created) def test_namespace_create_duplicate(self): fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created) self._assert_saved_fields(fixture, created) self.assertRaises(exception.Duplicate, self.db_api.metadef_namespace_create, self.context, fixture) def test_namespace_get(self): fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created) self._assert_saved_fields(fixture, created) found = self.db_api.metadef_namespace_get( self.context, created['namespace']) self.assertIsNotNone(found, "Namespace not found.") def test_namespace_get_all_with_resource_types_filter(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture = build_association_fixture() created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], fixture) self.assertIsNotNone(created, "Could not create an association.") rt_filters = {'resource_types': fixture['name']} found = self.db_api.metadef_namespace_get_all( self.context, filters=rt_filters, sort_key='created_at') self.assertEqual(1, len(found)) for item in found: self._assert_saved_fields(ns_fixture, item) def test_namespace_update(self): delta = {'owner': u'New Owner'} fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created['namespace']) self.assertEqual(fixture['namespace'], created['namespace']) delta_dict = copy.deepcopy(created) delta_dict.update(delta.copy()) updated = self.db_api.metadef_namespace_update( self.context, created['id'], delta_dict) self.assertEqual(delta['owner'], updated['owner']) def test_namespace_delete(self): fixture = build_namespace_fixture() created = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created, "Could not create a Namespace.") self.db_api.metadef_namespace_delete( self.context, created['namespace']) self.assertRaises(exception.NotFound, self.db_api.metadef_namespace_get, self.context, created['namespace']) def test_namespace_delete_with_content(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self._assert_saved_fields(fixture_ns, created_ns) # Create object content for the namespace fixture_obj = build_object_fixture() created_obj = self.db_api.metadef_object_create( self.context, created_ns['namespace'], fixture_obj) self.assertIsNotNone(created_obj) # Create property content for the namespace fixture_prop = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], fixture_prop) self.assertIsNotNone(created_prop) # Create associations fixture_assn = build_association_fixture() created_assn = self.db_api.metadef_resource_type_association_create( self.context, created_ns['namespace'], fixture_assn) self.assertIsNotNone(created_assn) deleted_ns = self.db_api.metadef_namespace_delete( self.context, created_ns['namespace']) self.assertRaises(exception.NotFound, self.db_api.metadef_namespace_get, self.context, deleted_ns['namespace']) class MetadefPropertyTests(object): def test_property_create(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_prop = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], fixture_prop) self._assert_saved_fields(fixture_prop, created_prop) def test_property_create_duplicate(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_prop = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], fixture_prop) self._assert_saved_fields(fixture_prop, created_prop) self.assertRaises(exception.Duplicate, self.db_api.metadef_property_create, self.context, created_ns['namespace'], fixture_prop) def test_property_get(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture_ns, created_ns) fixture_prop = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], fixture_prop) found_prop = self.db_api.metadef_property_get( self.context, created_ns['namespace'], created_prop['name']) self._assert_saved_fields(fixture_prop, found_prop) def test_property_get_all(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture1 = build_property_fixture(namespace_id=ns_created['id']) created_p1 = self.db_api.metadef_property_create( self.context, ns_created['namespace'], fixture1) self.assertIsNotNone(created_p1, "Could not create a property.") fixture2 = build_property_fixture(namespace_id=ns_created['id'], name='test-prop-2') created_p2 = self.db_api.metadef_property_create( self.context, ns_created['namespace'], fixture2) self.assertIsNotNone(created_p2, "Could not create a property.") found = self.db_api.metadef_property_get_all( self.context, ns_created['namespace']) self.assertEqual(2, len(found)) def test_property_update(self): delta = {'name': u'New-name', 'json_schema': u'new-schema'} fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) prop_fixture = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], prop_fixture) self.assertIsNotNone(created_prop, "Could not create a property.") delta_dict = copy.deepcopy(created_prop) delta_dict.update(delta.copy()) updated = self.db_api.metadef_property_update( self.context, created_ns['namespace'], created_prop['id'], delta_dict) self.assertEqual(delta['name'], updated['name']) self.assertEqual(delta['json_schema'], updated['json_schema']) def test_property_delete(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) prop_fixture = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], prop_fixture) self.assertIsNotNone(created_prop, "Could not create a property.") self.db_api.metadef_property_delete( self.context, created_ns['namespace'], created_prop['name']) self.assertRaises(exception.NotFound, self.db_api.metadef_property_get, self.context, created_ns['namespace'], created_prop['name']) def test_property_delete_namespace_content(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) prop_fixture = build_property_fixture(namespace_id=created_ns['id']) created_prop = self.db_api.metadef_property_create( self.context, created_ns['namespace'], prop_fixture) self.assertIsNotNone(created_prop, "Could not create a property.") self.db_api.metadef_property_delete_namespace_content( self.context, created_ns['namespace']) self.assertRaises(exception.NotFound, self.db_api.metadef_property_get, self.context, created_ns['namespace'], created_prop['name']) class MetadefObjectTests(object): def test_object_create(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_object = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], fixture_object) self._assert_saved_fields(fixture_object, created_object) def test_object_create_duplicate(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_object = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], fixture_object) self._assert_saved_fields(fixture_object, created_object) self.assertRaises(exception.Duplicate, self.db_api.metadef_object_create, self.context, created_ns['namespace'], fixture_object) def test_object_get(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture_ns) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture_ns, created_ns) fixture_object = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], fixture_object) found_object = self.db_api.metadef_object_get( self.context, created_ns['namespace'], created_object['name']) self._assert_saved_fields(fixture_object, found_object) def test_object_get_all(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create(self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture1 = build_object_fixture(namespace_id=ns_created['id']) created_o1 = self.db_api.metadef_object_create( self.context, ns_created['namespace'], fixture1) self.assertIsNotNone(created_o1, "Could not create an object.") fixture2 = build_object_fixture(namespace_id=ns_created['id'], name='test-object-2') created_o2 = self.db_api.metadef_object_create( self.context, ns_created['namespace'], fixture2) self.assertIsNotNone(created_o2, "Could not create an object.") found = self.db_api.metadef_object_get_all( self.context, ns_created['namespace']) self.assertEqual(2, len(found)) def test_object_update(self): delta = {'name': u'New-name', 'json_schema': u'new-schema', 'required': u'new-required'} fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) object_fixture = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], object_fixture) self.assertIsNotNone(created_object, "Could not create an object.") delta_dict = {} delta_dict.update(delta.copy()) updated = self.db_api.metadef_object_update( self.context, created_ns['namespace'], created_object['id'], delta_dict) self.assertEqual(delta['name'], updated['name']) self.assertEqual(delta['json_schema'], updated['json_schema']) def test_object_delete(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) object_fixture = build_object_fixture(namespace_id=created_ns['id']) created_object = self.db_api.metadef_object_create( self.context, created_ns['namespace'], object_fixture) self.assertIsNotNone(created_object, "Could not create an object.") self.db_api.metadef_object_delete( self.context, created_ns['namespace'], created_object['name']) self.assertRaises(exception.NotFound, self.db_api.metadef_object_get, self.context, created_ns['namespace'], created_object['name']) class MetadefResourceTypeTests(object): def test_resource_type_get_all(self): resource_types_orig = self.db_api.metadef_resource_type_get_all( self.context) fixture = build_resource_type_fixture() self.db_api.metadef_resource_type_create(self.context, fixture) resource_types = self.db_api.metadef_resource_type_get_all( self.context) test_len = len(resource_types_orig) + 1 self.assertEqual(test_len, len(resource_types)) class MetadefResourceTypeAssociationTests(object): def test_association_create(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created) self._assert_saved_fields(ns_fixture, ns_created) assn_fixture = build_association_fixture() assn_created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], assn_fixture) self.assertIsNotNone(assn_created) self._assert_saved_fields(assn_fixture, assn_created) def test_association_create_duplicate(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created) self._assert_saved_fields(ns_fixture, ns_created) assn_fixture = build_association_fixture() assn_created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], assn_fixture) self.assertIsNotNone(assn_created) self._assert_saved_fields(assn_fixture, assn_created) self.assertRaises(exception.Duplicate, self.db_api. metadef_resource_type_association_create, self.context, ns_created['namespace'], assn_fixture) def test_association_delete(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture = build_association_fixture() created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], fixture) self.assertIsNotNone(created, "Could not create an association.") created_resource = self.db_api.metadef_resource_type_get( self.context, fixture['name']) self.assertIsNotNone(created_resource, "resource_type not created") self.db_api.metadef_resource_type_association_delete( self.context, ns_created['namespace'], created_resource['name']) self.assertRaises(exception.NotFound, self.db_api.metadef_resource_type_association_get, self.context, ns_created['namespace'], created_resource['name']) def test_association_get_all_by_namespace(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create( self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture = build_association_fixture() created = self.db_api.metadef_resource_type_association_create( self.context, ns_created['namespace'], fixture) self.assertIsNotNone(created, "Could not create an association.") found = ( self.db_api.metadef_resource_type_association_get_all_by_namespace( self.context, ns_created['namespace'])) self.assertEqual(1, len(found)) for item in found: self._assert_saved_fields(fixture, item) class MetadefTagTests(object): def test_tag_create(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], fixture_tag) self._assert_saved_fields(fixture_tag, created_tag) def test_tag_create_duplicate(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], fixture_tag) self._assert_saved_fields(fixture_tag, created_tag) self.assertRaises(exception.Duplicate, self.db_api.metadef_tag_create, self.context, created_ns['namespace'], fixture_tag) def test_tag_create_tags(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3']) created_tags = self.db_api.metadef_tag_create_tags( self.context, created_ns['namespace'], tags) actual = set([tag['name'] for tag in created_tags]) expected = set(['Tag1', 'Tag2', 'Tag3']) self.assertEqual(expected, actual) def test_tag_create_duplicate_tags_1(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3', 'Tag2']) self.assertRaises(exception.Duplicate, self.db_api.metadef_tag_create_tags, self.context, created_ns['namespace'], tags) def test_tag_create_duplicate_tags_2(self): fixture = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture, created_ns) tags = build_tags_fixture(['Tag1', 'Tag2', 'Tag3']) self.db_api.metadef_tag_create_tags(self.context, created_ns['namespace'], tags) dup_tag = build_tag_fixture(namespace_id=created_ns['id'], name='Tag3') self.assertRaises(exception.Duplicate, self.db_api.metadef_tag_create, self.context, created_ns['namespace'], dup_tag) def test_tag_get(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture_ns) self.assertIsNotNone(created_ns) self._assert_saved_fields(fixture_ns, created_ns) fixture_tag = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], fixture_tag) found_tag = self.db_api.metadef_tag_get( self.context, created_ns['namespace'], created_tag['name']) self._assert_saved_fields(fixture_tag, found_tag) def test_tag_get_all(self): ns_fixture = build_namespace_fixture() ns_created = self.db_api.metadef_namespace_create(self.context, ns_fixture) self.assertIsNotNone(ns_created, "Could not create a namespace.") self._assert_saved_fields(ns_fixture, ns_created) fixture1 = build_tag_fixture(namespace_id=ns_created['id']) created_tag1 = self.db_api.metadef_tag_create( self.context, ns_created['namespace'], fixture1) self.assertIsNotNone(created_tag1, "Could not create tag 1.") fixture2 = build_tag_fixture(namespace_id=ns_created['id'], name='test-tag-2') created_tag2 = self.db_api.metadef_tag_create( self.context, ns_created['namespace'], fixture2) self.assertIsNotNone(created_tag2, "Could not create tag 2.") found = self.db_api.metadef_tag_get_all( self.context, ns_created['namespace'], sort_key='created_at') self.assertEqual(2, len(found)) def test_tag_update(self): delta = {'name': u'New-name'} fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create(self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) tag_fixture = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], tag_fixture) self.assertIsNotNone(created_tag, "Could not create a tag.") delta_dict = {} delta_dict.update(delta.copy()) updated = self.db_api.metadef_tag_update( self.context, created_ns['namespace'], created_tag['id'], delta_dict) self.assertEqual(delta['name'], updated['name']) def test_tag_delete(self): fixture_ns = build_namespace_fixture() created_ns = self.db_api.metadef_namespace_create( self.context, fixture_ns) self.assertIsNotNone(created_ns['namespace']) tag_fixture = build_tag_fixture(namespace_id=created_ns['id']) created_tag = self.db_api.metadef_tag_create( self.context, created_ns['namespace'], tag_fixture) self.assertIsNotNone(created_tag, "Could not create a tag.") self.db_api.metadef_tag_delete( self.context, created_ns['namespace'], created_tag['name']) self.assertRaises(exception.NotFound, self.db_api.metadef_tag_get, self.context, created_ns['namespace'], created_tag['name']) class MetadefDriverTests(MetadefNamespaceTests, MetadefResourceTypeTests, MetadefResourceTypeAssociationTests, MetadefPropertyTests, MetadefObjectTests, MetadefTagTests): # collection class pass glance-12.0.0/glance/tests/functional/db/test_simple.py0000664000567000056710000000463512701407047024222 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api import CONF import glance.db.simple.api import glance.tests.functional.db as db_tests from glance.tests.functional.db import base def get_db(config): CONF.set_override('data_api', 'glance.db.simple.api', enforce_type=True) db_api = glance.db.get_api() return db_api def reset_db(db_api): db_api.reset() class TestSimpleDriver(base.TestDriver, base.DriverTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSimpleDriver, self).setUp() self.addCleanup(db_tests.reset) class TestSimpleQuota(base.DriverQuotaTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSimpleQuota, self).setUp() self.addCleanup(db_tests.reset) class TestSimpleVisibility(base.TestVisibility, base.VisibilityTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSimpleVisibility, self).setUp() self.addCleanup(db_tests.reset) class TestSimpleMembershipVisibility(base.TestMembershipVisibility, base.MembershipVisibilityTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSimpleMembershipVisibility, self).setUp() self.addCleanup(db_tests.reset) class TestSimpleTask(base.TaskTests, base.FunctionalInitWrapper): def setUp(self): db_tests.load(get_db, reset_db) super(TestSimpleTask, self).setUp() self.addCleanup(db_tests.reset) glance-12.0.0/glance/tests/functional/db/test_rpc_endpoint.py0000664000567000056710000000376012701407047025413 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils import requests from glance.tests import functional class TestRegistryURLVisibility(functional.FunctionalTest): def setUp(self): super(TestRegistryURLVisibility, self).setUp() self.cleanup() self.registry_server.deployment_flavor = '' self.req_body = jsonutils.dumps([{"command": "image_get_all"}]) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.registry_port, path) def _headers(self, custom_headers=None): base_headers = { } base_headers.update(custom_headers or {}) return base_headers def test_v2_not_enabled(self): self.registry_server.enable_v2_registry = False self.start_servers(**self.__dict__.copy()) path = self._url('/rpc') response = requests.post(path, headers=self._headers(), data=self.req_body) self.assertEqual(404, response.status_code) self.stop_servers() def test_v2_enabled(self): self.registry_server.enable_v2_registry = True self.start_servers(**self.__dict__.copy()) path = self._url('/rpc') response = requests.post(path, headers=self._headers(), data=self.req_body) self.assertEqual(200, response.status_code) self.stop_servers() glance-12.0.0/glance/tests/functional/test_healthcheck_middleware.py0000664000567000056710000000331212701407047026773 0ustar jenkinsjenkins00000000000000# Copyright 2015 Hewlett Packard # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests healthcheck middleware.""" import tempfile import httplib2 from glance.tests import functional from glance.tests import utils class HealthcheckMiddlewareTest(functional.FunctionalTest): def request(self): url = 'http://127.0.0.1:%s/healthcheck' % self.api_port http = httplib2.Http() return http.request(url, 'GET') @utils.skip_if_disabled def test_healthcheck_enabled(self): self.cleanup() self.start_servers(**self.__dict__.copy()) response, content = self.request() self.assertEqual('OK', content) self.assertEqual(200, response.status) self.stop_servers() def test_healthcheck_disabled(self): with tempfile.NamedTemporaryFile() as test_disable_file: self.cleanup() self.api_server.disable_path = test_disable_file.name self.start_servers(**self.__dict__.copy()) response, content = self.request() self.assertEqual('DISABLED BY FILE', content) self.assertEqual(503, response.status) self.stop_servers() glance-12.0.0/glance/tests/functional/v2/0000775000567000056710000000000012701407204021245 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/functional/v2/test_tasks.py0000664000567000056710000001175112701407047024015 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_serialization import jsonutils import requests from glance.tests import functional TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) TENANT3 = str(uuid.uuid4()) TENANT4 = str(uuid.uuid4()) class TestTasks(functional.FunctionalTest): def setUp(self): super(TestTasks, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'admin', } base_headers.update(custom_headers or {}) return base_headers def test_task_not_allowed_non_admin(self): self.start_servers(**self.__dict__.copy()) roles = {'X-Roles': 'member'} # Task list should be empty path = self._url('/v2/tasks') response = requests.get(path, headers=self._headers(roles)) self.assertEqual(403, response.status_code) def test_task_lifecycle(self): self.start_servers(**self.__dict__.copy()) # Task list should be empty path = self._url('/v2/tasks') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tasks = jsonutils.loads(response.text)['tasks'] self.assertEqual(0, len(tasks)) # Create a task path = self._url('/v2/tasks') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({ "type": "import", "input": { "import_from": "http://example.com", "import_from_format": "qcow2", "image_properties": { 'disk_format': 'vhd', 'container_format': 'ovf' } } }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned task entity should have a generated id and status task = jsonutils.loads(response.text) task_id = task['id'] self.assertIn('Location', response.headers) self.assertEqual(path + '/' + task_id, response.headers['Location']) checked_keys = set([u'created_at', u'id', u'input', u'message', u'owner', u'schema', u'self', u'status', u'type', u'result', u'updated_at']) self.assertEqual(checked_keys, set(task.keys())) expected_task = { 'status': 'pending', 'type': 'import', 'input': { "import_from": "http://example.com", "import_from_format": "qcow2", "image_properties": { 'disk_format': 'vhd', 'container_format': 'ovf' }}, 'schema': '/v2/schemas/task', } for key, value in expected_task.items(): self.assertEqual(value, task[key], key) # Tasks list should now have one entry path = self._url('/v2/tasks') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tasks = jsonutils.loads(response.text)['tasks'] self.assertEqual(1, len(tasks)) self.assertEqual(task_id, tasks[0]['id']) # Attempt to delete a task path = self._url('/v2/tasks/%s' % tasks[0]['id']) response = requests.delete(path, headers=self._headers()) self.assertEqual(405, response.status_code) self.assertIsNotNone(response.headers.get('Allow')) self.assertEqual('GET', response.headers.get('Allow')) self.stop_servers() class TestTasksWithRegistry(TestTasks): def setUp(self): super(TestTasksWithRegistry, self).setUp() self.api_server.data_api = ( 'glance.tests.functional.v2.registry_data_api') self.registry_server.deployment_flavor = 'trusted-auth' glance-12.0.0/glance/tests/functional/v2/test_metadef_namespaces.py0000664000567000056710000002340212701407047026470 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from oslo_serialization import jsonutils import requests from glance.tests import functional TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) class TestNamespaces(functional.FunctionalTest): def setUp(self): super(TestNamespaces, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'admin', } base_headers.update(custom_headers or {}) return base_headers def test_namespace_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description" } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) namespace_loc_header = response.headers['Location'] # Returned namespace should match the created namespace with default # values of visibility=private, protected=False and owner=Context # Tenant namespace = jsonutils.loads(response.text) checked_keys = set([ u'namespace', u'display_name', u'description', u'visibility', u'self', u'schema', u'protected', u'owner', u'created_at', u'updated_at' ]) self.assertEqual(set(namespace.keys()), checked_keys) expected_namespace = { "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "private", "protected": False, "owner": TENANT1, "self": "/v2/metadefs/namespaces/%s" % namespace_name, "schema": "/v2/schemas/metadefs/namespace" } for key, value in expected_namespace.items(): self.assertEqual(namespace[key], value, key) # Attempt to insert a duplicate response = requests.post(path, headers=headers, data=data) self.assertEqual(409, response.status_code) # Get the namespace using the returned Location header response = requests.get(namespace_loc_header, headers=self._headers()) self.assertEqual(200, response.status_code) namespace = jsonutils.loads(response.text) self.assertEqual(namespace_name, namespace['namespace']) self.assertNotIn('object', namespace) self.assertEqual(TENANT1, namespace['owner']) self.assertEqual('private', namespace['visibility']) self.assertFalse(namespace['protected']) # The namespace should be mutable path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) media_type = 'application/json' headers = self._headers({'content-type': media_type}) namespace_name = "MyNamespace-UPDATED" data = jsonutils.dumps( { "namespace": namespace_name, "display_name": "display_name-UPDATED", "description": "description-UPDATED", "visibility": "private", # Not changed "protected": True, "owner": TENANT2 } ) response = requests.put(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned namespace should reflect the changes namespace = jsonutils.loads(response.text) self.assertEqual('MyNamespace-UPDATED', namespace_name) self.assertEqual('display_name-UPDATED', namespace['display_name']) self.assertEqual('description-UPDATED', namespace['description']) self.assertEqual('private', namespace['visibility']) self.assertTrue(namespace['protected']) self.assertEqual(TENANT2, namespace['owner']) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) namespace = jsonutils.loads(response.text) self.assertEqual('MyNamespace-UPDATED', namespace['namespace']) self.assertEqual('display_name-UPDATED', namespace['display_name']) self.assertEqual('description-UPDATED', namespace['description']) self.assertEqual('private', namespace['visibility']) self.assertTrue(namespace['protected']) self.assertEqual(TENANT2, namespace['owner']) # Deletion should not work on protected namespaces path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) response = requests.delete(path, headers=self._headers()) self.assertEqual(403, response.status_code) # Unprotect namespace for deletion path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) media_type = 'application/json' headers = self._headers({'content-type': media_type}) doc = { "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": TENANT2 } data = jsonutils.dumps(doc) response = requests.put(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Deletion should work. Deleting namespace MyNamespace path = self._url('/v2/metadefs/namespaces/%s' % namespace_name) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) def test_metadef_dont_accept_illegal_bodies(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/bodytest') response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'bodytest' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description" } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Test all the urls that supply data data_urls = [ '/v2/schemas/metadefs/namespace', '/v2/schemas/metadefs/namespaces', '/v2/schemas/metadefs/resource_type', '/v2/schemas/metadefs/resource_types', '/v2/schemas/metadefs/property', '/v2/schemas/metadefs/properties', '/v2/schemas/metadefs/object', '/v2/schemas/metadefs/objects', '/v2/schemas/metadefs/tag', '/v2/schemas/metadefs/tags', '/v2/metadefs/resource_types', ] for value in data_urls: path = self._url(value) data = jsonutils.dumps(["body"]) response = requests.get(path, headers=self._headers(), data=data) self.assertEqual(400, response.status_code) # Put the namespace into the url test_urls = [ ('/v2/metadefs/namespaces/%s/resource_types', 'get'), ('/v2/metadefs/namespaces/%s/resource_types/type', 'delete'), ('/v2/metadefs/namespaces/%s', 'get'), ('/v2/metadefs/namespaces/%s', 'delete'), ('/v2/metadefs/namespaces/%s/objects/name', 'get'), ('/v2/metadefs/namespaces/%s/objects/name', 'delete'), ('/v2/metadefs/namespaces/%s/properties', 'get'), ('/v2/metadefs/namespaces/%s/tags/test', 'get'), ('/v2/metadefs/namespaces/%s/tags/test', 'post'), ('/v2/metadefs/namespaces/%s/tags/test', 'delete'), ] for link, method in test_urls: path = self._url(link % namespace_name) data = jsonutils.dumps(["body"]) response = getattr(requests, method)( path, headers=self._headers(), data=data) self.assertEqual(400, response.status_code) glance-12.0.0/glance/tests/functional/v2/__init__.py0000664000567000056710000000000012701407047023351 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/functional/v2/test_metadef_objects.py0000664000567000056710000002614712701407047026013 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from oslo_serialization import jsonutils import requests from glance.tests import functional TENANT1 = str(uuid.uuid4()) class TestMetadefObjects(functional.FunctionalTest): def setUp(self): super(TestMetadefObjects, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'admin', } base_headers.update(custom_headers or {}) return base_headers def test_metadata_objects_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": "The Test Owner" } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Metadata objects should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace/objects/object1') response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create a object path = self._url('/v2/metadefs/namespaces/MyNamespace/objects') headers = self._headers({'content-type': 'application/json'}) metadata_object_name = "object1" data = jsonutils.dumps( { "name": metadata_object_name, "description": "object1 description.", "required": [ "property1" ], "properties": { "property1": { "type": "integer", "title": "property1", "description": "property1 description", "operators": [""], "default": 100, "minimum": 100, "maximum": 30000369 }, "property2": { "type": "string", "title": "property2", "description": "property2 description ", "default": "value2", "minLength": 2, "maxLength": 50 } } } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Attempt to insert a duplicate response = requests.post(path, headers=headers, data=data) self.assertEqual(409, response.status_code) # Get the metadata object created above path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) metadata_object = jsonutils.loads(response.text) self.assertEqual("object1", metadata_object['name']) # Returned object should match the created object metadata_object = jsonutils.loads(response.text) checked_keys = set([ u'name', u'description', u'properties', u'required', u'self', u'schema', u'created_at', u'updated_at' ]) self.assertEqual(set(metadata_object.keys()), checked_keys) expected_metadata_object = { "name": metadata_object_name, "description": "object1 description.", "required": [ "property1" ], "properties": { 'property1': { 'type': 'integer', "title": "property1", 'description': 'property1 description', 'operators': [''], 'default': 100, 'minimum': 100, 'maximum': 30000369 }, "property2": { "type": "string", "title": "property2", "description": "property2 description ", "default": "value2", "minLength": 2, "maxLength": 50 } }, "self": "/v2/metadefs/namespaces/%(" "namespace)s/objects/%(object)s" % {'namespace': namespace_name, 'object': metadata_object_name}, "schema": "v2/schemas/metadefs/object" } # Simple key values checked_values = set([ u'name', u'description', ]) for key, value in expected_metadata_object.items(): if(key in checked_values): self.assertEqual(metadata_object[key], value, key) # Complex key values - properties for key, value in ( expected_metadata_object["properties"]['property2'].items()): self.assertEqual( metadata_object["properties"]["property2"][key], value, key ) # The metadata_object should be mutable path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) media_type = 'application/json' headers = self._headers({'content-type': media_type}) metadata_object_name = "object1-UPDATED" data = jsonutils.dumps( { "name": metadata_object_name, "description": "desc-UPDATED", "required": [ "property2" ], "properties": { 'property1': { 'type': 'integer', "title": "property1", 'description': 'p1 desc-UPDATED', 'default': 500, 'minimum': 500, 'maximum': 1369 }, "property2": { "type": "string", "title": "property2", "description": "p2 desc-UPDATED", 'operators': [''], "default": "value2-UPDATED", "minLength": 5, "maxLength": 150 } } } ) response = requests.put(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned metadata_object should reflect the changes metadata_object = jsonutils.loads(response.text) self.assertEqual('object1-UPDATED', metadata_object['name']) self.assertEqual('desc-UPDATED', metadata_object['description']) self.assertEqual('property2', metadata_object['required'][0]) updated_property1 = metadata_object['properties']['property1'] updated_property2 = metadata_object['properties']['property2'] self.assertEqual('integer', updated_property1['type']) self.assertEqual('p1 desc-UPDATED', updated_property1['description']) self.assertEqual('500', updated_property1['default']) self.assertEqual(500, updated_property1['minimum']) self.assertEqual(1369, updated_property1['maximum']) self.assertEqual([''], updated_property2['operators']) self.assertEqual('string', updated_property2['type']) self.assertEqual('p2 desc-UPDATED', updated_property2['description']) self.assertEqual('value2-UPDATED', updated_property2['default']) self.assertEqual(5, updated_property2['minLength']) self.assertEqual(150, updated_property2['maxLength']) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) self.assertEqual('object1-UPDATED', metadata_object['name']) self.assertEqual('desc-UPDATED', metadata_object['description']) self.assertEqual('property2', metadata_object['required'][0]) updated_property1 = metadata_object['properties']['property1'] updated_property2 = metadata_object['properties']['property2'] self.assertEqual('integer', updated_property1['type']) self.assertEqual('p1 desc-UPDATED', updated_property1['description']) self.assertEqual('500', updated_property1['default']) self.assertEqual(500, updated_property1['minimum']) self.assertEqual(1369, updated_property1['maximum']) self.assertEqual([''], updated_property2['operators']) self.assertEqual('string', updated_property2['type']) self.assertEqual('p2 desc-UPDATED', updated_property2['description']) self.assertEqual('value2-UPDATED', updated_property2['default']) self.assertEqual(5, updated_property2['minLength']) self.assertEqual(150, updated_property2['maxLength']) # Deletion of metadata_object object1 path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # metadata_object object1 should not exist path = self._url('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadata_object_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) glance-12.0.0/glance/tests/functional/v2/test_metadef_properties.py0000664000567000056710000002226412701407047026552 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from oslo_serialization import jsonutils import requests from glance.tests import functional TENANT1 = str(uuid.uuid4()) class TestNamespaceProperties(functional.FunctionalTest): def setUp(self): super(TestNamespaceProperties, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'admin', } base_headers.update(custom_headers or {}) return base_headers def test_properties_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' resource_type_name = 'MyResourceType' resource_type_prefix = 'MyPrefix' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": "The Test Owner", "resource_type_associations": [ { "name": resource_type_name, "prefix": resource_type_prefix } ] }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Property1 should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace/properties' '/property1') response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create a property path = self._url('/v2/metadefs/namespaces/MyNamespace/properties') headers = self._headers({'content-type': 'application/json'}) property_name = "property1" data = jsonutils.dumps( { "name": property_name, "type": "integer", "title": "property1", "description": "property1 description", "default": 100, "minimum": 100, "maximum": 30000369, "readonly": False, } ) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Attempt to insert a duplicate response = requests.post(path, headers=headers, data=data) self.assertEqual(409, response.status_code) # Get the property created above path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) property_object = jsonutils.loads(response.text) self.assertEqual("integer", property_object['type']) self.assertEqual("property1", property_object['title']) self.assertEqual("property1 description", property_object[ 'description']) self.assertEqual('100', property_object['default']) self.assertEqual(100, property_object['minimum']) self.assertEqual(30000369, property_object['maximum']) # Get the property with specific resource type association path = self._url('/v2/metadefs/namespaces/%s/properties/%s%s' % ( namespace_name, property_name, '='.join(['?resource_type', resource_type_name]))) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Get the property with prefix and specific resource type association property_name_with_prefix = ''.join([resource_type_prefix, property_name]) path = self._url('/v2/metadefs/namespaces/%s/properties/%s%s' % ( namespace_name, property_name_with_prefix, '='.join([ '?resource_type', resource_type_name]))) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) property_object = jsonutils.loads(response.text) self.assertEqual("integer", property_object['type']) self.assertEqual("property1", property_object['title']) self.assertEqual("property1 description", property_object[ 'description']) self.assertEqual('100', property_object['default']) self.assertEqual(100, property_object['minimum']) self.assertEqual(30000369, property_object['maximum']) self.assertFalse(property_object['readonly']) # Returned property should match the created property property_object = jsonutils.loads(response.text) checked_keys = set([ u'name', u'type', u'title', u'description', u'default', u'minimum', u'maximum', u'readonly', ]) self.assertEqual(set(property_object.keys()), checked_keys) expected_metadata_property = { "type": "integer", "title": "property1", "description": "property1 description", "default": '100', "minimum": 100, "maximum": 30000369, "readonly": False, } for key, value in expected_metadata_property.items(): self.assertEqual(property_object[key], value, key) # The property should be mutable path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) media_type = 'application/json' headers = self._headers({'content-type': media_type}) property_name = "property1-UPDATED" data = jsonutils.dumps( { "name": property_name, "type": "string", "title": "string property", "description": "desc-UPDATED", "operators": [""], "default": "value-UPDATED", "minLength": 5, "maxLength": 10, "readonly": True, } ) response = requests.put(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned property should reflect the changes property_object = jsonutils.loads(response.text) self.assertEqual('string', property_object['type']) self.assertEqual('desc-UPDATED', property_object['description']) self.assertEqual('value-UPDATED', property_object['default']) self.assertEqual([""], property_object['operators']) self.assertEqual(5, property_object['minLength']) self.assertEqual(10, property_object['maxLength']) self.assertTrue(property_object['readonly']) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) response = requests.get(path, headers=self._headers()) self.assertEqual('string', property_object['type']) self.assertEqual('desc-UPDATED', property_object['description']) self.assertEqual('value-UPDATED', property_object['default']) self.assertEqual([""], property_object['operators']) self.assertEqual(5, property_object['minLength']) self.assertEqual(10, property_object['maxLength']) # Deletion of property property1 path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # property1 should not exist path = self._url('/v2/metadefs/namespaces/%s/properties/%s' % (namespace_name, property_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) glance-12.0.0/glance/tests/functional/v2/test_metadef_resourcetypes.py0000664000567000056710000002500312701407047027264 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import six import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_resource_type import ResourceType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations from glance.api.v2.model.metadef_resource_type import ResourceTypes from glance.common import exception from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _, _LE import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class ResourceTypeController(object): def __init__(self, db_api=None, policy_enforcer=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.gateway = glance.gateway.Gateway(db_api=self.db_api, policy_enforcer=self.policy) def index(self, req): try: filters = {'namespace': None} rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type_list = rs_type_repo.list(filters=filters) resource_type_list = [ResourceType.to_wsme_model( resource_type) for resource_type in db_resource_type_list] resource_types = ResourceTypes() resource_types.resource_types = resource_type_list except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(e) raise webob.exc.HTTPInternalServerError(e) return resource_types def show(self, req, namespace): try: filters = {'namespace': namespace} rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type_list = rs_type_repo.list(filters=filters) resource_type_list = [ResourceTypeAssociation.to_wsme_model( resource_type) for resource_type in db_resource_type_list] resource_types = ResourceTypeAssociations() resource_types.resource_type_associations = resource_type_list except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(e) raise webob.exc.HTTPInternalServerError(e) return resource_types def create(self, req, resource_type, namespace): rs_type_factory = self.gateway.get_metadef_resource_type_factory( req.context) rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) try: new_resource_type = rs_type_factory.new_resource_type( namespace=namespace, **resource_type.to_dict()) rs_type_repo.add(new_resource_type) except exception.Forbidden as e: msg = (_LE("Forbidden to create resource type. " "Reason: %(reason)s") % {'reason': encodeutils.exception_to_unicode(e)}) LOG.error(msg) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(e) raise webob.exc.HTTPInternalServerError() return ResourceTypeAssociation.to_wsme_model(new_resource_type) def delete(self, req, namespace, resource_type): rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) try: filters = {} found = False filters['namespace'] = namespace db_resource_type_list = rs_type_repo.list(filters=filters) for db_resource_type in db_resource_type_list: if db_resource_type.name == resource_type: db_resource_type.delete() rs_type_repo.remove(db_resource_type) found = True if not found: raise exception.NotFound() except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: msg = (_("Failed to find resource type %(resourcetype)s to " "delete") % {'resourcetype': resource_type}) LOG.error(msg) raise webob.exc.HTTPNotFound(explanation=msg) except Exception as e: LOG.error(e) raise webob.exc.HTTPInternalServerError() class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden( explanation=encodeutils.exception_to_unicode(msg)) def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) resource_type = json.fromjson(ResourceTypeAssociation, body) return dict(resource_type=resource_type) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def show(self, response, result): resource_type_json = json.tojson(ResourceTypeAssociations, result) body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def index(self, response, result): resource_type_json = json.tojson(ResourceTypes, result) body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def create(self, response, result): resource_type_json = json.tojson(ResourceTypeAssociation, result) response.status_int = 201 body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 def _get_base_properties(): return { 'name': { 'type': 'string', 'description': _('Resource type names should be aligned with Heat ' 'resource types whenever possible: ' 'http://docs.openstack.org/developer/heat/' 'template_guide/openstack.html'), 'maxLength': 80, }, 'prefix': { 'type': 'string', 'description': _('Specifies the prefix to use for the given ' 'resource type. Any properties in the namespace ' 'should be prefixed with this prefix when being ' 'applied to the specified resource type. Must ' 'include prefix separator (e.g. a colon :).'), 'maxLength': 80, }, 'properties_target': { 'type': 'string', 'description': _('Some resource types allow more than one key / ' 'value pair per instance. For example, Cinder ' 'allows user and image metadata on volumes. Only ' 'the image properties metadata is evaluated by ' 'Nova (scheduling or drivers). This property ' 'allows a namespace target to remove the ' 'ambiguity.'), 'maxLength': 80, }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of resource type association"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last resource type " "association modification"), "format": "date-time" } } def get_schema(): properties = _get_base_properties() mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs() schema = glance.schema.Schema( 'resource_type_association', properties, required=mandatory_attrs, ) return schema def get_collection_schema(): resource_type_schema = get_schema() return glance.schema.CollectionSchema('resource_type_associations', resource_type_schema) def create_resource(): """ResourceTypeAssociation resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ResourceTypeController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/tests/functional/v2/registry_data_api.py0000664000567000056710000000405412701407047025321 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.db.registry.api import * # noqa from glance.common.rpc import RPCClient from glance.registry.client.v2 import api from glance.registry.client.v2 import client def patched_bulk_request(self, commands): # We add some auth headers which are typically # added by keystone. This is required when testing # without keystone, otherwise the tests fail. # We use the 'trusted-auth' deployment flavour # for testing so that these headers are interpreted # as expected (ie the same way as if keystone was # present) body = self._serializer.to_json(commands) headers = {"X-Identity-Status": "Confirmed", 'X-Roles': 'member'} if self.context.user is not None: headers['X-User-Id'] = self.context.user if self.context.tenant is not None: headers['X-Tenant-Id'] = self.context.tenant response = super(RPCClient, self).do_request('POST', self.base_path, body, headers=headers) return self._deserializer.from_json(response.read()) def client_wrapper(func): def call(context): reg_client = func(context) reg_client.context = context return reg_client return call client.RegistryClient.bulk_request = patched_bulk_request api.get_registry_client = client_wrapper(api.get_registry_client) glance-12.0.0/glance/tests/functional/v2/test_metadef_tags.py0000664000567000056710000001543712701407047025320 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import uuid from oslo_serialization import jsonutils import requests from glance.tests import functional TENANT1 = str(uuid.uuid4()) class TestMetadefTags(functional.FunctionalTest): def setUp(self): super(TestMetadefTags, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.start_servers(**self.__dict__.copy()) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'admin', } base_headers.update(custom_headers or {}) return base_headers def test_metadata_tags_lifecycle(self): # Namespace should not exist path = self._url('/v2/metadefs/namespaces/MyNamespace') response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create a namespace path = self._url('/v2/metadefs/namespaces') headers = self._headers({'content-type': 'application/json'}) namespace_name = 'MyNamespace' data = jsonutils.dumps({ "namespace": namespace_name, "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": False, "owner": "The Test Owner"} ) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Metadata tag should not exist metadata_tag_name = "tag1" path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create the metadata tag headers = self._headers({'content-type': 'application/json'}) response = requests.post(path, headers=headers) self.assertEqual(201, response.status_code) # Get the metadata tag created above response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) metadata_tag = jsonutils.loads(response.text) self.assertEqual(metadata_tag_name, metadata_tag['name']) # Returned tag should match the created tag metadata_tag = jsonutils.loads(response.text) checked_keys = set([ u'name', u'created_at', u'updated_at' ]) self.assertEqual(checked_keys, set(metadata_tag.keys())) expected_metadata_tag = { "name": metadata_tag_name } # Simple key values checked_values = set([ u'name' ]) for key, value in expected_metadata_tag.items(): if(key in checked_values): self.assertEqual(metadata_tag[key], value, key) # Try to create a duplicate metadata tag headers = self._headers({'content-type': 'application/json'}) response = requests.post(path, headers=headers) self.assertEqual(409, response.status_code) # The metadata_tag should be mutable path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) media_type = 'application/json' headers = self._headers({'content-type': media_type}) metadata_tag_name = "tag1-UPDATED" data = jsonutils.dumps( { "name": metadata_tag_name } ) response = requests.put(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned metadata_tag should reflect the changes metadata_tag = jsonutils.loads(response.text) self.assertEqual('tag1-UPDATED', metadata_tag['name']) # Updates should persist across requests path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) self.assertEqual('tag1-UPDATED', metadata_tag['name']) # Deletion of metadata_tag_name path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # metadata_tag_name should not exist path = self._url('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadata_tag_name)) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # Create multiple tags. path = self._url('/v2/metadefs/namespaces/%s/tags' % (namespace_name)) headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps( {"tags": [{"name": "tag1"}, {"name": "tag2"}, {"name": "tag3"}]} ) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # List out the three new tags. response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(3, len(tags)) # Attempt to create bogus duplicate tag4 data = jsonutils.dumps( {"tags": [{"name": "tag4"}, {"name": "tag5"}, {"name": "tag4"}]} ) response = requests.post(path, headers=headers, data=data) self.assertEqual(409, response.status_code) # Verify the previous 3 still exist response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(3, len(tags)) glance-12.0.0/glance/tests/functional/v2/test_schemas.py0000664000567000056710000000436312701407047024314 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils import requests from glance.tests import functional class TestSchemas(functional.FunctionalTest): def setUp(self): super(TestSchemas, self).setUp() self.cleanup() self.start_servers(**self.__dict__.copy()) def test_resource(self): # Ensure the image link works and custom properties are loaded path = 'http://%s:%d/v2/schemas/image' % ('127.0.0.1', self.api_port) response = requests.get(path) self.assertEqual(200, response.status_code) image_schema = jsonutils.loads(response.text) expected = set([ 'id', 'name', 'visibility', 'checksum', 'created_at', 'updated_at', 'tags', 'size', 'virtual_size', 'owner', 'container_format', 'disk_format', 'self', 'file', 'status', 'schema', 'direct_url', 'locations', 'min_ram', 'min_disk', 'protected', ]) self.assertEqual(expected, set(image_schema['properties'].keys())) # Ensure the images link works and agrees with the image schema path = 'http://%s:%d/v2/schemas/images' % ('127.0.0.1', self.api_port) response = requests.get(path) self.assertEqual(200, response.status_code) images_schema = jsonutils.loads(response.text) item_schema = images_schema['properties']['images']['items'] self.assertEqual(item_schema, image_schema) self.stop_servers() glance-12.0.0/glance/tests/functional/v2/test_images.py0000664000567000056710000046553512701407051024145 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import signal import uuid from oslo_serialization import jsonutils import requests import six # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from six.moves import urllib from glance.tests import functional from glance.tests import utils as test_utils TENANT1 = str(uuid.uuid4()) TENANT2 = str(uuid.uuid4()) TENANT3 = str(uuid.uuid4()) TENANT4 = str(uuid.uuid4()) class TestImages(functional.FunctionalTest): def setUp(self): super(TestImages, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.api_server.data_api = 'glance.db.sqlalchemy.api' for i in range(3): ret = test_utils.start_http_server("foo_image_id%d" % i, "foo_image%d" % i) setattr(self, 'http_server%d_pid' % i, ret[0]) setattr(self, 'http_port%d' % i, ret[1]) def tearDown(self): for i in range(3): pid = getattr(self, 'http_server%d_pid' % i, None) if pid: os.kill(pid, signal.SIGKILL) super(TestImages, self).tearDown() def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'member', } base_headers.update(custom_headers or {}) return base_headers def test_v1_none_properties_v2(self): self.api_server.deployment_flavor = 'noauth' self.api_server.use_user_token = True self.api_server.send_identity_credentials = True self.registry_server.deployment_flavor = '' # Image list should be empty self.start_servers(**self.__dict__.copy()) # Create an image (with two deployer-defined properties) path = self._url('/v1/images') headers = self._headers({'content-type': 'application/octet-stream'}) headers.update(test_utils.minimal_headers('image-1')) # NOTE(flaper87): Sending empty string, the server will use None headers['x-image-meta-property-my_empty_prop'] = '' response = requests.post(path, headers=headers) self.assertEqual(201, response.status_code) data = jsonutils.loads(response.text) image_id = data['image']['id'] # NOTE(flaper87): Get the image using V2 and verify # the returned value for `my_empty_prop` is an empty # string. path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('', image['my_empty_prop']) self.stop_servers() def test_not_authenticated_in_registry_on_ops(self): # https://bugs.launchpad.net/glance/+bug/1451850 # this configuration guarantees that authentication succeeds in # glance-api and fails in glance-registry if no token is passed self.api_server.deployment_flavor = '' # make sure that request will reach registry self.api_server.data_api = 'glance.db.registry.api' self.registry_server.deployment_flavor = 'fakeauth' self.start_servers(**self.__dict__.copy()) headers = {'content-type': 'application/json'} image = {'name': 'image', 'type': 'kernel', 'disk_format': 'qcow2', 'container_format': 'bare'} # image create should return 401 response = requests.post(self._url('/v2/images'), headers=headers, data=jsonutils.dumps(image)) self.assertEqual(401, response.status_code) # image list should return 401 response = requests.get(self._url('/v2/images')) self.assertEqual(401, response.status_code) # image show should return 401 response = requests.get(self._url('/v2/images/someimageid')) self.assertEqual(401, response.status_code) # image update should return 401 ops = [{'op': 'replace', 'path': '/protected', 'value': False}] media_type = 'application/openstack-images-v2.1-json-patch' response = requests.patch(self._url('/v2/images/someimageid'), headers={'content-type': media_type}, data=jsonutils.dumps(ops)) self.assertEqual(401, response.status_code) # image delete should return 401 response = requests.delete(self._url('/v2/images/someimageid')) self.assertEqual(401, response.status_code) self.stop_servers() def test_image_lifecycle(self): # Image list should be empty self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'abc': 'xyz'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image_location_header = response.headers['Location'] # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] checked_keys = set([ u'status', u'name', u'tags', u'created_at', u'updated_at', u'visibility', u'self', u'protected', u'id', u'file', u'min_disk', u'foo', u'abc', u'type', u'min_ram', u'schema', u'disk_format', u'container_format', u'owner', u'checksum', u'size', u'virtual_size', u'locations', ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'foo': 'bar', 'abc': 'xyz', 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have one entry path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Create another image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', 'bar': 'foo', 'disk_format': 'aki', 'container_format': 'aki', 'xyz': 'abc'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image2_id = image['id'] checked_keys = set([ u'status', u'name', u'tags', u'created_at', u'updated_at', u'visibility', u'self', u'protected', u'id', u'file', u'min_disk', u'bar', u'xyz', u'type', u'min_ram', u'schema', u'disk_format', u'container_format', u'owner', u'checksum', u'size', u'virtual_size', u'locations', ]) self.assertEqual(checked_keys, set(image.keys())) expected_image = { 'status': 'queued', 'name': 'image-2', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image2_id, 'protected': False, 'file': '/v2/images/%s/file' % image2_id, 'min_disk': 0, 'bar': 'foo', 'xyz': 'abc', 'type': 'kernel', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Image list should now have two entries path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # Image list should list only image-2 as image-1 doesn't contain the # property 'bar' path = self._url('/v2/images?bar=foo') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Image list should list only image-1 as image-2 doesn't contain the # property 'foo' path = self._url('/v2/images?foo=bar') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # The "changes-since" filter shouldn't work on glance v2 path = self._url('/v2/images?changes-since=20001007T10:10:10') response = requests.get(path, headers=self._headers()) self.assertEqual(400, response.status_code) path = self._url('/v2/images?changes-since=aaa') response = requests.get(path, headers=self._headers()) self.assertEqual(400, response.status_code) # Image list should list only image-1 based on the filter # 'foo=bar&abc=xyz' path = self._url('/v2/images?foo=bar&abc=xyz') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # Image list should list only image-2 based on the filter # 'bar=foo&xyz=abc' path = self._url('/v2/images?bar=foo&xyz=abc') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Image list should not list anything as the filter 'foo=baz&abc=xyz' # is not satisfied by either images path = self._url('/v2/images?foo=baz&abc=xyz') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Get the image using the returned Location header response = requests.get(image_location_header, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual(image_id, image['id']) self.assertIsNone(image['checksum']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) self.assertEqual('bar', image['foo']) self.assertFalse(image['protected']) self.assertEqual('kernel', image['type']) self.assertTrue(image['created_at']) self.assertTrue(image['updated_at']) self.assertEqual(image['updated_at'], image['created_at']) # The URI file:// should return a 400 rather than a 500 path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) url = ('file://') changes = [{ 'op': 'add', 'path': '/locations/-', 'value': { 'url': url, 'metadata': {} } }] data = jsonutils.dumps(changes) response = requests.patch(path, headers=headers, data=data) self.assertEqual(400, response.status_code, response.text) # The image should be mutable, including adding and removing properties path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/name', 'value': 'image-2'}, {'op': 'replace', 'path': '/disk_format', 'value': 'vhd'}, {'op': 'replace', 'path': '/container_format', 'value': 'ami'}, {'op': 'replace', 'path': '/foo', 'value': 'baz'}, {'op': 'add', 'path': '/ping', 'value': 'pong'}, {'op': 'replace', 'path': '/protected', 'value': True}, {'op': 'remove', 'path': '/type'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) self.assertEqual('image-2', image['name']) self.assertEqual('vhd', image['disk_format']) self.assertEqual('baz', image['foo']) self.assertEqual('pong', image['ping']) self.assertTrue(image['protected']) self.assertNotIn('type', image, response.text) # Adding 11 image properties should fail since configured limit is 10 path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) changes = [] for i in range(11): changes.append({'op': 'add', 'path': '/ping%i' % i, 'value': 'pong'}) data = jsonutils.dumps(changes) response = requests.patch(path, headers=headers, data=data) self.assertEqual(413, response.status_code, response.text) # Adding 3 image locations should fail since configured limit is 2 path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) changes = [] for i in range(3): url = ('http://127.0.0.1:%s/foo_image' % getattr(self, 'http_port%d' % i)) changes.append({'op': 'add', 'path': '/locations/-', 'value': {'url': url, 'metadata': {}}, }) data = jsonutils.dumps(changes) response = requests.patch(path, headers=headers, data=data) self.assertEqual(413, response.status_code, response.text) # Ensure the v2.0 json-patch content type is accepted path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.0-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([{'add': '/ding', 'value': 'dong'}]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) self.assertEqual('dong', image['ding']) # Updates should persist across requests path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual(image_id, image['id']) self.assertEqual('image-2', image['name']) self.assertEqual('baz', image['foo']) self.assertEqual('pong', image['ping']) self.assertTrue(image['protected']) self.assertNotIn('type', image, response.text) # Try to download data before its uploaded path = self._url('/v2/images/%s/file' % image_id) headers = self._headers() response = requests.get(path, headers=headers) self.assertEqual(204, response.status_code) def _verify_image_checksum_and_status(checksum, status): # Checksum should be populated and status should be active path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual(checksum, image['checksum']) self.assertEqual(status, image['status']) # Upload some image data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) expected_checksum = '8f113e38d28a79a5a451b16048cc2b72' _verify_image_checksum_and_status(expected_checksum, 'active') # `disk_format` and `container_format` cannot # be replaced when the image is active. immutable_paths = ['/disk_format', '/container_format'] media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) path = self._url('/v2/images/%s' % image_id) for immutable_path in immutable_paths: data = jsonutils.dumps([ {'op': 'replace', 'path': immutable_path, 'value': 'ari'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code) # Try to download the data that was just uploaded path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) self.assertEqual(expected_checksum, response.headers['Content-MD5']) self.assertEqual('ZZZZZ', response.text) # Uploading duplicate data should be rejected with a 409. The # original data should remain untouched. path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='XXX') self.assertEqual(409, response.status_code) _verify_image_checksum_and_status(expected_checksum, 'active') # Ensure the size is updated to reflect the data uploaded path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) self.assertEqual(5, jsonutils.loads(response.text)['size']) # Should be able to deactivate image path = self._url('/v2/images/%s/actions/deactivate' % image_id) response = requests.post(path, data={}, headers=self._headers()) self.assertEqual(204, response.status_code) # Change the image to public so TENANT2 can see it path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.0-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([{"replace": "/visibility", "value": "public"}]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Tennant2 should get Forbidden when deactivating the public image path = self._url('/v2/images/%s/actions/deactivate' % image_id) response = requests.post(path, data={}, headers=self._headers( {'X-Tenant-Id': TENANT2})) self.assertEqual(403, response.status_code) # Tennant2 should get Forbidden when reactivating the public image path = self._url('/v2/images/%s/actions/reactivate' % image_id) response = requests.post(path, data={}, headers=self._headers( {'X-Tenant-Id': TENANT2})) self.assertEqual(403, response.status_code) # Deactivating a deactivated image succeeds (no-op) path = self._url('/v2/images/%s/actions/deactivate' % image_id) response = requests.post(path, data={}, headers=self._headers()) self.assertEqual(204, response.status_code) # Can't download a deactivated image path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(403, response.status_code) # Deactivated image should still be in a listing path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # Should be able to reactivate a deactivated image path = self._url('/v2/images/%s/actions/reactivate' % image_id) response = requests.post(path, data={}, headers=self._headers()) self.assertEqual(204, response.status_code) # Reactivating an active image succeeds (no-op) path = self._url('/v2/images/%s/actions/reactivate' % image_id) response = requests.post(path, data={}, headers=self._headers()) self.assertEqual(204, response.status_code) # Deletion should not work on protected images path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(403, response.status_code) # Unprotect image for deletion path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) doc = [{'op': 'replace', 'path': '/protected', 'value': False}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Deletion should work. Deleting image-1 path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) # And neither should its data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers() response = requests.get(path, headers=headers) self.assertEqual(404, response.status_code) # Image list should now contain just image-2 path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # Deleting image-2 should work path = self._url('/v2/images/%s' % image2_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # Image list should now be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create image that tries to send True should return 400 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = 'true' response = requests.post(path, headers=headers, data=data) self.assertEqual(400, response.status_code) # Create image that tries to send a string should return 400 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = '"hello"' response = requests.post(path, headers=headers, data=data) self.assertEqual(400, response.status_code) # Create image that tries to send 123 should return 400 path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = '123' response = requests.post(path, headers=headers, data=data) self.assertEqual(400, response.status_code) self.stop_servers() def test_update_readonly_prop(self): self.start_servers(**self.__dict__.copy()) # Create an image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1'}) response = requests.post(path, headers=headers, data=data) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) props = ['/id', '/file', '/location', '/schema', '/self'] for prop in props: doc = [{'op': 'replace', 'path': prop, 'value': 'value1'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code) for prop in props: doc = [{'op': 'remove', 'path': prop, 'value': 'value1'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code) for prop in props: doc = [{'op': 'add', 'path': prop, 'value': 'value1'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code) self.stop_servers() def test_methods_that_dont_accept_illegal_bodies(self): # Check images can be reached self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) # Test all the schemas schema_urls = [ '/v2/schemas/images', '/v2/schemas/image', '/v2/schemas/members', '/v2/schemas/member', ] for value in schema_urls: path = self._url(value) data = jsonutils.dumps(["body"]) response = requests.get(path, headers=self._headers(), data=data) self.assertEqual(400, response.status_code) # Create image for use with tests path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] test_urls = [ ('/v2/images/%s', 'get'), ('/v2/images/%s/actions/deactivate', 'post'), ('/v2/images/%s/actions/reactivate', 'post'), ('/v2/images/%s/tags/mytag', 'put'), ('/v2/images/%s/tags/mytag', 'delete'), ('/v2/images/%s/members', 'get'), ('/v2/images/%s/file', 'get'), ('/v2/images/%s', 'delete'), ] for link, method in test_urls: path = self._url(link % image_id) data = jsonutils.dumps(["body"]) response = getattr(requests, method)( path, headers=self._headers(), data=data) self.assertEqual(400, response.status_code) # DELETE /images/imgid without legal json path = self._url('/v2/images/%s' % image_id) data = '{"hello"]' response = requests.delete(path, headers=self._headers(), data=data) self.assertEqual(400, response.status_code) # POST /images/imgid/members path = self._url('/v2/images/%s/members' % image_id) data = jsonutils.dumps({'member': TENANT3}) response = requests.post(path, headers=self._headers(), data=data) self.assertEqual(200, response.status_code) # GET /images/imgid/members/memid path = self._url('/v2/images/%s/members/%s' % (image_id, TENANT3)) data = jsonutils.dumps(["body"]) response = requests.get(path, headers=self._headers(), data=data) self.assertEqual(400, response.status_code) # DELETE /images/imgid/members/memid path = self._url('/v2/images/%s/members/%s' % (image_id, TENANT3)) data = jsonutils.dumps(["body"]) response = requests.delete(path, headers=self._headers(), data=data) self.assertEqual(400, response.status_code) self.stop_servers() def test_download_random_access(self): self.start_servers(**self.__dict__.copy()) # Create another image (with two deployer-defined properties) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-2', 'type': 'kernel', 'bar': 'foo', 'disk_format': 'aki', 'container_format': 'aki', 'xyz': 'abc'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # Upload data to image image_data = 'Z' * 15 path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data=image_data) self.assertEqual(204, response.status_code) result_body = '' for x in range(15): # NOTE(flaper87): Read just 1 byte. Content-Range is # 0-indexed and it specifies the first byte to read # and the last byte to read. content_range = 'bytes %s-%s/15' % (x, x) headers = self._headers({'Content-Range': content_range}) path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=headers) result_body += response.text self.assertEqual(result_body, image_data) self.stop_servers() def test_download_policy_when_cache_is_not_enabled(self): rules = {'context_is_admin': 'role:admin', 'default': '', 'add_image': '', 'get_image': '', 'modify_image': '', 'upload_image': '', 'delete_image': '', 'download_image': '!'} self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned image entity image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in six.iteritems(expected_image): self.assertEqual(value, image[key], key) # Upload data to image path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # Get an image should fail path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.get(path, headers=headers) self.assertEqual(403, response.status_code) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_download_image_not_allowed_using_restricted_policy(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned image entity image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in six.iteritems(expected_image): self.assertEqual(value, image[key], key) # Upload data to image path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # Get an image should fail path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream', 'X-Roles': '_member_'}) response = requests.get(path, headers=headers) self.assertEqual(403, response.status_code) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_download_image_allowed_using_restricted_policy(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned image entity image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in six.iteritems(expected_image): self.assertEqual(value, value, key) # Upload data to image path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # Get an image should be allowed path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream', 'X-Roles': 'member'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_download_image_raises_service_unavailable(self): """Test image download returns HTTPServiceUnavailable.""" self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get image id image = jsonutils.loads(response.text) image_id = image['id'] # Update image locations via PATCH path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) http_server_pid, http_port = test_utils.start_http_server(image_id, "image-1") values = [{'url': 'http://127.0.0.1:%s/image-1' % http_port, 'metadata': {'idx': '0'}}] doc = [{'op': 'replace', 'path': '/locations', 'value': values}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) # Download an image should work path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # Stop http server used to update image location os.kill(http_server_pid, signal.SIGKILL) # Download an image should raise HTTPServiceUnavailable path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(503, response.status_code) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_image_modification_works_for_owning_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "tenant:%(owner)s", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers['content-type'] = media_type del headers['X-Roles'] data = jsonutils.dumps([ {'op': 'replace', 'path': '/name', 'value': 'new-name'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) self.stop_servers() def test_image_modification_fails_on_mismatched_tenant_ids(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "'A-Fake-Tenant-Id':%(owner)s", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted" } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers['content-type'] = media_type del headers['X-Roles'] data = jsonutils.dumps([ {'op': 'replace', 'path': '/name', 'value': 'new-name'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code) self.stop_servers() def test_member_additions_works_for_owning_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "tenant:%(owner)s", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] # Get the image's members resource path = self._url('/v2/images/%s/members' % image_id) body = jsonutils.dumps({'member': TENANT3}) del headers['X-Roles'] response = requests.post(path, headers=headers, data=body) self.assertEqual(200, response.status_code) self.stop_servers() def test_image_additions_works_only_for_specific_tenant_id(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "'{0}':%(owner)s".format(TENANT1), "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) headers['X-Tenant-Id'] = TENANT2 response = requests.post(path, headers=headers, data=data) self.assertEqual(403, response.status_code) self.stop_servers() def test_owning_tenant_id_can_retrieve_image_information(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "tenant:%(owner)s", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Remove the admin role del headers['X-Roles'] # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] # Can retrieve the image as TENANT1 path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # Can retrieve the image's members as TENANT1 path = self._url('/v2/images/%s/members' % image_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) headers['X-Tenant-Id'] = TENANT2 response = requests.get(path, headers=headers) self.assertEqual(403, response.status_code) self.stop_servers() def test_owning_tenant_can_publicize_image(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "publicize_image": "tenant:%(owner)s", "get_image": "tenant:%(owner)s", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Tenant-Id': TENANT1, }) doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) def test_owning_tenant_can_delete_image(self): rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "publicize_image": "tenant:%(owner)s", "get_image": "tenant:%(owner)s", "modify_image": "", "upload_image": "", "get_image_location": "", "delete_image": "", "restricted": "not ('aki':%(container_format)s and role:_member_)", "download_image": "role:admin or rule:restricted", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=headers) self.assertEqual(204, response.status_code) def test_list_show_ok_when_get_location_allowed_for_admins(self): self.api_server.show_image_direct_url = True self.api_server.show_multiple_locations = True # setup context to allow a list locations by admin only rules = { "context_is_admin": "role:admin", "default": "", "add_image": "", "get_image": "", "modify_image": "", "upload_image": "", "get_image_location": "role:admin", "delete_image": "", "restricted": "", "download_image": "", "add_member": "", } self.set_policy_rules(rules) self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Tenant-Id': TENANT1}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image's ID image = jsonutils.loads(response.text) image_id = image['id'] # Can retrieve the image as TENANT1 path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # Can list images as TENANT1 path = self._url('/v2/images') response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) self.stop_servers() def test_image_size_cap(self): self.api_server.image_size_cap = 128 self.start_servers(**self.__dict__.copy()) # create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-size-cap-test-image', 'type': 'kernel', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # try to populate it with oversized data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) class StreamSim(object): # Using a one-shot iterator to force chunked transfer in the PUT # request def __init__(self, size): self.size = size def __iter__(self): yield 'Z' * self.size response = requests.put(path, headers=headers, data=StreamSim( self.api_server.image_size_cap + 1)) self.assertEqual(413, response.status_code) # hashlib.md5('Z'*129).hexdigest() # == '76522d28cb4418f12704dfa7acd6e7ee' # If the image has this checksum, it means that the whole stream was # accepted and written to the store, which should not be the case. path = self._url('/v2/images/{0}'.format(image_id)) headers = self._headers({'content-type': 'application/json'}) response = requests.get(path, headers=headers) image_checksum = jsonutils.loads(response.text).get('checksum') self.assertNotEqual(image_checksum, '76522d28cb4418f12704dfa7acd6e7ee') def test_permissions(self): self.start_servers(**self.__dict__.copy()) # Create an image that belongs to TENANT1 path = self._url('/v2/images') headers = self._headers({'Content-Type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'raw', 'container_format': 'bare'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image_id = jsonutils.loads(response.text)['id'] # Upload some image data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # TENANT1 should see the image in their list path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(image_id, images[0]['id']) # TENANT1 should be able to access the image directly path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) # TENANT2 should not see the image in their list path = self._url('/v2/images') headers = self._headers({'X-Tenant-Id': TENANT2}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # TENANT2 should not be able to access the image directly path = self._url('/v2/images/%s' % image_id) headers = self._headers({'X-Tenant-Id': TENANT2}) response = requests.get(path, headers=headers) self.assertEqual(404, response.status_code) # TENANT2 should not be able to modify the image, either path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Tenant-Id': TENANT2, }) doc = [{'op': 'replace', 'path': '/name', 'value': 'image-2'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(404, response.status_code) # TENANT2 should not be able to delete the image, either path = self._url('/v2/images/%s' % image_id) headers = self._headers({'X-Tenant-Id': TENANT2}) response = requests.delete(path, headers=headers) self.assertEqual(404, response.status_code) # Publicize the image as an admin of TENANT1 path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Roles': 'admin', }) doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) # TENANT3 should now see the image in their list path = self._url('/v2/images') headers = self._headers({'X-Tenant-Id': TENANT3}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(image_id, images[0]['id']) # TENANT3 should also be able to access the image directly path = self._url('/v2/images/%s' % image_id) headers = self._headers({'X-Tenant-Id': TENANT3}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # TENANT3 still should not be able to modify the image path = self._url('/v2/images/%s' % image_id) headers = self._headers({ 'Content-Type': 'application/openstack-images-v2.1-json-patch', 'X-Tenant-Id': TENANT3, }) doc = [{'op': 'replace', 'path': '/name', 'value': 'image-2'}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code) # TENANT3 should not be able to delete the image, either path = self._url('/v2/images/%s' % image_id) headers = self._headers({'X-Tenant-Id': TENANT3}) response = requests.delete(path, headers=headers) self.assertEqual(403, response.status_code) # Image data should still be present after the failed delete path = self._url('/v2/images/%s/file' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) self.assertEqual(response.text, 'ZZZZZ') self.stop_servers() def test_property_protections_with_roles(self): # Enable property protection self.api_server.property_protection_file = self.property_file_roles self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image for role member with extra props # Raises 403 since user is not allowed to set 'foo' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'image-1', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'x_owner_foo': 'o_s_bar'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(403, response.status_code) # Create an image for role member without 'foo' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_owner_foo': 'o_s_bar'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned image entity should have 'x_owner_foo' image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_owner_foo': 'o_s_bar', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Create an image for role spl_role with extra props path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'spl_role'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'spl_create_prop': 'create_bar', 'spl_create_prop_policy': 'create_policy_bar', 'spl_read_prop': 'read_bar', 'spl_update_prop': 'update_bar', 'spl_delete_prop': 'delete_bar', 'spl_delete_empty_prop': ''}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # Attempt to replace, add and remove properties which are forbidden path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'spl_role'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/spl_read_prop', 'value': 'r'}, {'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code, response.text) # Attempt to replace, add and remove properties which are forbidden path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'spl_role'}) data = jsonutils.dumps([ {'op': 'add', 'path': '/spl_new_prop', 'value': 'new'}, {'op': 'remove', 'path': '/spl_create_prop'}, {'op': 'remove', 'path': '/spl_delete_prop'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code, response.text) # Attempt to replace properties path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'spl_role'}) data = jsonutils.dumps([ # Updating an empty property to verify bug #1332103. {'op': 'replace', 'path': '/spl_update_prop', 'value': ''}, {'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) # 'spl_update_prop' has update permission for spl_role # hence the value has changed self.assertEqual('u', image['spl_update_prop']) # Attempt to remove properties path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'spl_role'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/spl_delete_prop'}, # Deleting an empty property to verify bug #1332103. {'op': 'remove', 'path': '/spl_delete_empty_prop'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) # 'spl_delete_prop' and 'spl_delete_empty_prop' have delete # permission for spl_role hence the property has been deleted self.assertNotIn('spl_delete_prop', image.keys()) self.assertNotIn('spl_delete_empty_prop', image.keys()) # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_property_protections_with_policies(self): # Enable property protection self.api_server.property_protection_file = self.property_file_policies self.api_server.property_protection_rule_format = 'policies' self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image for role member with extra props # Raises 403 since user is not allowed to set 'foo' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'image-1', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'x_owner_foo': 'o_s_bar'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(403, response.status_code) # Create an image for role member without 'foo' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'member'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned image entity image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Create an image for role spl_role with extra props path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'spl_role, admin'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'spl_creator_policy': 'creator_bar', 'spl_default_policy': 'default_bar'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('creator_bar', image['spl_creator_policy']) self.assertEqual('default_bar', image['spl_default_policy']) # Attempt to replace a property which is permitted path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ # Updating an empty property to verify bug #1332103. {'op': 'replace', 'path': '/spl_creator_policy', 'value': ''}, {'op': 'replace', 'path': '/spl_creator_policy', 'value': 'r'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) # 'spl_creator_policy' has update permission for admin # hence the value has changed self.assertEqual('r', image['spl_creator_policy']) # Attempt to replace a property which is forbidden path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'spl_role'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/spl_creator_policy', 'value': 'z'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code, response.text) # Attempt to read properties path = self._url('/v2/images/%s' % image_id) headers = self._headers({'content-type': media_type, 'X-Roles': 'random_role'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) # 'random_role' is allowed read 'spl_default_policy'. self.assertEqual(image['spl_default_policy'], 'default_bar') # 'random_role' is forbidden to read 'spl_creator_policy'. self.assertNotIn('spl_creator_policy', image) # Attempt to replace and remove properties which are permitted path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ # Deleting an empty property to verify bug #1332103. {'op': 'replace', 'path': '/spl_creator_policy', 'value': ''}, {'op': 'remove', 'path': '/spl_creator_policy'}, ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) # 'spl_creator_policy' has delete permission for admin # hence the value has been deleted self.assertNotIn('spl_creator_policy', image) # Attempt to read a property that is permitted path = self._url('/v2/images/%s' % image_id) headers = self._headers({'content-type': media_type, 'X-Roles': 'random_role'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) # Returned image entity should reflect the changes image = jsonutils.loads(response.text) self.assertEqual(image['spl_default_policy'], 'default_bar') # Image Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # This image should be no longer be directly accessible path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(404, response.status_code) self.stop_servers() def test_property_protections_special_chars_roles(self): # Enable property protection self.api_server.property_protection_file = self.property_file_roles self.start_servers(**self.__dict__.copy()) # Verify both admin and unknown role can create properties marked with # '@' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_admin': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_all_permitted_admin': '1', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'joe_soap'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_joe_soap': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_all_permitted_joe_soap': '1', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Verify both admin and unknown role can read properties marked with # '@' headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('1', image['x_all_permitted_joe_soap']) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'joe_soap'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('1', image['x_all_permitted_joe_soap']) # Verify both admin and unknown role can update properties marked with # '@' path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_all_permitted_joe_soap', 'value': '2'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertEqual('2', image['x_all_permitted_joe_soap']) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'joe_soap'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_all_permitted_joe_soap', 'value': '3'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertEqual('3', image['x_all_permitted_joe_soap']) # Verify both admin and unknown role can delete properties marked with # '@' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_a': '1', 'x_all_permitted_b': '2' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_all_permitted_a'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertNotIn('x_all_permitted_a', image.keys()) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'joe_soap'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_all_permitted_b'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertNotIn('x_all_permitted_b', image.keys()) # Verify neither admin nor unknown role can create a property protected # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_permitted_admin': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(403, response.status_code) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'joe_soap'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_permitted_joe_soap': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(403, response.status_code) # Verify neither admin nor unknown role can read properties marked with # '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_read': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertNotIn('x_none_read', image.keys()) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('x_none_read', image.keys()) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'joe_soap'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('x_none_read', image.keys()) # Verify neither admin nor unknown role can update properties marked # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_update': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('1', image['x_none_update']) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_none_update', 'value': '2'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'joe_soap'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_none_update', 'value': '3'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(409, response.status_code, response.text) # Verify neither admin nor unknown role can delete properties marked # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_delete': '1', }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_none_delete'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'joe_soap'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_none_delete'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(409, response.status_code, response.text) self.stop_servers() def test_property_protections_special_chars_policies(self): # Enable property protection self.api_server.property_protection_file = self.property_file_policies self.api_server.property_protection_rule_format = 'policies' self.start_servers(**self.__dict__.copy()) # Verify both admin and unknown role can create properties marked with # '@' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_admin': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_all_permitted_admin': '1', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'joe_soap'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_joe_soap': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] expected_image = { 'status': 'queued', 'name': 'image-1', 'tags': [], 'visibility': 'private', 'self': '/v2/images/%s' % image_id, 'protected': False, 'file': '/v2/images/%s/file' % image_id, 'min_disk': 0, 'x_all_permitted_joe_soap': '1', 'min_ram': 0, 'schema': '/v2/schemas/image', } for key, value in expected_image.items(): self.assertEqual(value, image[key], key) # Verify both admin and unknown role can read properties marked with # '@' headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('1', image['x_all_permitted_joe_soap']) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'joe_soap'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual('1', image['x_all_permitted_joe_soap']) # Verify both admin and unknown role can update properties marked with # '@' path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_all_permitted_joe_soap', 'value': '2'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertEqual('2', image['x_all_permitted_joe_soap']) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'joe_soap'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_all_permitted_joe_soap', 'value': '3'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertEqual('3', image['x_all_permitted_joe_soap']) # Verify both admin and unknown role can delete properties marked with # '@' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_all_permitted_a': '1', 'x_all_permitted_b': '2' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_all_permitted_a'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertNotIn('x_all_permitted_a', image.keys()) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'joe_soap'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_all_permitted_b'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) image = jsonutils.loads(response.text) self.assertNotIn('x_all_permitted_b', image.keys()) # Verify neither admin nor unknown role can create a property protected # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_permitted_admin': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(403, response.status_code) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'joe_soap'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_permitted_joe_soap': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(403, response.status_code) # Verify neither admin nor unknown role can read properties marked with # '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_read': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertNotIn('x_none_read', image.keys()) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('x_none_read', image.keys()) headers = self._headers({'content-type': 'application/json', 'X-Roles': 'joe_soap'}) path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('x_none_read', image.keys()) # Verify neither admin nor unknown role can update properties marked # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_update': '1' }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('1', image['x_none_update']) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_none_update', 'value': '2'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'joe_soap'}) data = jsonutils.dumps([ {'op': 'replace', 'path': '/x_none_update', 'value': '3'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(409, response.status_code, response.text) # Verify neither admin nor unknown role can delete properties marked # with '!' path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json', 'X-Roles': 'admin'}) data = jsonutils.dumps({ 'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki', 'x_none_delete': '1', }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'admin'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_none_delete'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(403, response.status_code, response.text) path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type, 'X-Roles': 'joe_soap'}) data = jsonutils.dumps([ {'op': 'remove', 'path': '/x_none_delete'} ]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(409, response.status_code, response.text) self.stop_servers() def test_tag_lifecycle(self): self.start_servers(**self.__dict__.copy()) # Create an image with a tag - duplicate should be ignored path = self._url('/v2/images') headers = self._headers({'Content-Type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'tags': ['sniff', 'sniff']}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image_id = jsonutils.loads(response.text)['id'] # Image should show a list with a single tag path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff'], tags) # Delete all tags for tag in tags: path = self._url('/v2/images/%s/tags/%s' % (image_id, tag)) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # Update image with too many tags via PUT # Configured limit is 10 tags for i in range(10): path = self._url('/v2/images/%s/tags/foo%i' % (image_id, i)) response = requests.put(path, headers=self._headers()) self.assertEqual(204, response.status_code) # 11th tag should fail path = self._url('/v2/images/%s/tags/fail_me' % image_id) response = requests.put(path, headers=self._headers()) self.assertEqual(413, response.status_code) # Make sure the 11th tag was not added path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(10, len(tags)) # Update image tags via PATCH path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) doc = [ { 'op': 'replace', 'path': '/tags', 'value': ['foo'], }, ] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) # Update image with too many tags via PATCH # Configured limit is 10 tags path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) tags = ['foo%d' % i for i in range(11)] doc = [ { 'op': 'replace', 'path': '/tags', 'value': tags, }, ] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(413, response.status_code) # Tags should not have changed since request was over limit path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['foo'], tags) # Update image with duplicate tag - it should be ignored path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) doc = [ { 'op': 'replace', 'path': '/tags', 'value': ['sniff', 'snozz', 'snozz'], }, ] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff', 'snozz'], sorted(tags)) # Image should show the appropriate tags path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff', 'snozz'], sorted(tags)) # Attempt to tag the image with a duplicate should be ignored path = self._url('/v2/images/%s/tags/snozz' % image_id) response = requests.put(path, headers=self._headers()) self.assertEqual(204, response.status_code) # Create another more complex tag path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) response = requests.put(path, headers=self._headers()) self.assertEqual(204, response.status_code) # Double-check that the tags container on the image is populated path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['gabe@example.com', 'sniff', 'snozz'], sorted(tags)) # Query images by single tag path = self._url('/v2/images?tag=sniff') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual('image-1', images[0]['name']) # Query images by multiple tags path = self._url('/v2/images?tag=sniff&tag=snozz') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual('image-1', images[0]['name']) # Query images by tag and other attributes path = self._url('/v2/images?tag=sniff&status=queued') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual('image-1', images[0]['name']) # Query images by tag and a nonexistent tag path = self._url('/v2/images?tag=sniff&tag=fake') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # The tag should be deletable path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # List of tags should reflect the deletion path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff', 'snozz'], sorted(tags)) # Deleting the same tag should return a 404 path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(404, response.status_code) # The tags won't be able to query the images after deleting path = self._url('/v2/images?tag=gabe%%40example.com') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Try to add a tag that is too long big_tag = 'a' * 300 path = self._url('/v2/images/%s/tags/%s' % (image_id, big_tag)) response = requests.put(path, headers=self._headers()) self.assertEqual(400, response.status_code) # Tags should not have changed since request was over limit path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) tags = jsonutils.loads(response.text)['tags'] self.assertEqual(['sniff', 'snozz'], sorted(tags)) self.stop_servers() def test_images_container(self): # Image list should be empty and no next link should be present self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] first = jsonutils.loads(response.text)['first'] self.assertEqual(0, len(images)) self.assertNotIn('next', jsonutils.loads(response.text)) self.assertEqual('/v2/images', first) # Create 7 images images = [] fixtures = [ {'name': 'image-3', 'type': 'kernel', 'ping': 'pong', 'container_format': 'ami', 'disk_format': 'ami'}, {'name': 'image-4', 'type': 'kernel', 'ping': 'pong', 'container_format': 'bare', 'disk_format': 'ami'}, {'name': 'image-1', 'type': 'kernel', 'ping': 'pong'}, {'name': 'image-3', 'type': 'ramdisk', 'ping': 'pong'}, {'name': 'image-2', 'type': 'kernel', 'ping': 'ding'}, {'name': 'image-3', 'type': 'kernel', 'ping': 'pong'}, {'name': 'image-2,image-5', 'type': 'kernel', 'ping': 'pong'}, ] path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) for fixture in fixtures: data = jsonutils.dumps(fixture) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) images.append(jsonutils.loads(response.text)) # Image list should contain 7 images path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(7, len(body['images'])) self.assertEqual('/v2/images', body['first']) self.assertNotIn('next', jsonutils.loads(response.text)) # Image list filters by created_at time url_template = '/v2/images?created_at=lt:%s' path = self._url(url_template % images[0]['created_at']) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['images'])) self.assertEqual(url_template % images[0]['created_at'], urllib.parse.unquote(body['first'])) # Image list filters by updated_at time url_template = '/v2/images?updated_at=lt:%s' path = self._url(url_template % images[2]['updated_at']) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertGreaterEqual(3, len(body['images'])) self.assertEqual(url_template % images[2]['updated_at'], urllib.parse.unquote(body['first'])) # Image list filters by updated_at and created time with invalid value url_template = '/v2/images?%s=lt:invalid_value' for filter in ['updated_at', 'created_at']: path = self._url(url_template % filter) response = requests.get(path, headers=self._headers()) self.assertEqual(400, response.status_code) # Image list filters by updated_at and created_at with invalid operator url_template = '/v2/images?%s=invalid_operator:2015-11-19T12:24:02Z' for filter in ['updated_at', 'created_at']: path = self._url(url_template % filter) response = requests.get(path, headers=self._headers()) self.assertEqual(400, response.status_code) # Image list filters by non-'URL encoding' value path = self._url('/v2/images?name=%FF') response = requests.get(path, headers=self._headers()) self.assertEqual(400, response.status_code) # Image list filters by name with in operator url_template = '/v2/images?name=in:%s' filter_value = 'image-1,image-2' path = self._url(url_template % filter_value) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertGreaterEqual(3, len(body['images'])) # Image list filters by container_format with in operator url_template = '/v2/images?container_format=in:%s' filter_value = 'bare,ami' path = self._url(url_template % filter_value) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertGreaterEqual(2, len(body['images'])) # Image list filters by disk_format with in operator url_template = '/v2/images?disk_format=in:%s' filter_value = 'bare,ami,iso' path = self._url(url_template % filter_value) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertGreaterEqual(2, len(body['images'])) # Begin pagination after the first image template_url = ('/v2/images?limit=2&sort_dir=asc&sort_key=name' '&marker=%s&type=kernel&ping=pong') path = self._url(template_url % images[2]['id']) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(2, len(body['images'])) response_ids = [image['id'] for image in body['images']] self.assertEqual([images[6]['id'], images[0]['id']], response_ids) # Continue pagination using next link from previous request path = self._url(body['next']) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(2, len(body['images'])) response_ids = [image['id'] for image in body['images']] self.assertEqual([images[5]['id'], images[1]['id']], response_ids) # Continue pagination - expect no results path = self._url(body['next']) response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['images'])) # Delete first image path = self._url('/v2/images/%s' % images[0]['id']) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) # Ensure bad request for using a deleted image as marker path = self._url('/v2/images?marker=%s' % images[0]['id']) response = requests.get(path, headers=self._headers()) self.assertEqual(400, response.status_code) self.stop_servers() def test_image_visibility_to_different_users(self): self.cleanup() self.api_server.deployment_flavor = 'fakeauth' self.registry_server.deployment_flavor = 'fakeauth' kwargs = self.__dict__.copy() kwargs['use_user_token'] = True self.start_servers(**kwargs) owners = ['admin', 'tenant1', 'tenant2', 'none'] visibilities = ['public', 'private'] for owner in owners: for visibility in visibilities: path = self._url('/v2/images') headers = self._headers({ 'content-type': 'application/json', 'X-Auth-Token': 'createuser:%s:admin' % owner, }) data = jsonutils.dumps({ 'name': '%s-%s' % (owner, visibility), 'visibility': visibility, }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) def list_images(tenant, role='', visibility=None): auth_token = 'user:%s:%s' % (tenant, role) headers = {'X-Auth-Token': auth_token} path = self._url('/v2/images') if visibility is not None: path += '?visibility=%s' % visibility response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) return jsonutils.loads(response.text)['images'] # 1. Known user sees public and their own images images = list_images('tenant1') self.assertEqual(5, len(images)) for image in images: self.assertTrue(image['visibility'] == 'public' or 'tenant1' in image['name']) # 2. Known user, visibility=public, sees all public images images = list_images('tenant1', visibility='public') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 3. Known user, visibility=private, sees only their private image images = list_images('tenant1', visibility='private') self.assertEqual(1, len(images)) image = images[0] self.assertEqual('private', image['visibility']) self.assertIn('tenant1', image['name']) # 4. Unknown user sees only public images images = list_images('none') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 5. Unknown user, visibility=public, sees only public images images = list_images('none', visibility='public') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 6. Unknown user, visibility=private, sees no images images = list_images('none', visibility='private') self.assertEqual(0, len(images)) # 7. Unknown admin sees all images images = list_images('none', role='admin') self.assertEqual(8, len(images)) # 8. Unknown admin, visibility=public, shows only public images images = list_images('none', role='admin', visibility='public') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 9. Unknown admin, visibility=private, sees only private images images = list_images('none', role='admin', visibility='private') self.assertEqual(4, len(images)) for image in images: self.assertEqual('private', image['visibility']) # 10. Known admin sees all images images = list_images('admin', role='admin') self.assertEqual(8, len(images)) # 11. Known admin, visibility=public, sees all public images images = list_images('admin', role='admin', visibility='public') self.assertEqual(4, len(images)) for image in images: self.assertEqual('public', image['visibility']) # 12. Known admin, visibility=private, sees all private images images = list_images('admin', role='admin', visibility='private') self.assertEqual(4, len(images)) for image in images: self.assertEqual('private', image['visibility']) self.stop_servers() def test_update_locations(self): self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) # Update locations for the queued image path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) url = 'http://127.0.0.1:%s/foo_image' % self.http_port0 data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', 'value': [{'url': url, 'metadata': {}}] }]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code, response.text) # The image size should be updated path = self._url('/v2/images/%s' % image_id) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertEqual(10, image['size']) def test_update_locations_with_restricted_sources(self): self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Returned image entity should have a generated id and status image = jsonutils.loads(response.text) image_id = image['id'] self.assertEqual('queued', image['status']) self.assertIsNone(image['size']) self.assertIsNone(image['virtual_size']) # Update locations for the queued image path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', 'value': [{'url': 'file:///foo_image', 'metadata': {}}] }]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(400, response.status_code, response.text) data = jsonutils.dumps([{'op': 'replace', 'path': '/locations', 'value': [{'url': 'swift+config:///foo_image', 'metadata': {}}] }]) response = requests.patch(path, headers=headers, data=data) self.assertEqual(400, response.status_code, response.text) class TestImagesWithRegistry(TestImages): def setUp(self): super(TestImagesWithRegistry, self).setUp() self.api_server.data_api = ( 'glance.tests.functional.v2.registry_data_api') self.registry_server.deployment_flavor = 'trusted-auth' class TestImageDirectURLVisibility(functional.FunctionalTest): def setUp(self): super(TestImageDirectURLVisibility, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'member', } base_headers.update(custom_headers or {}) return base_headers def test_v2_not_enabled(self): self.api_server.enable_v2_api = False self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(300, response.status_code) self.stop_servers() def test_v2_enabled(self): self.api_server.enable_v2_api = True self.start_servers(**self.__dict__.copy()) path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) self.stop_servers() def test_image_direct_url_visible(self): self.api_server.show_image_direct_url = True self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki', 'visibility': 'public'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image id image = jsonutils.loads(response.text) image_id = image['id'] # Image direct_url should not be visible before location is set path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('direct_url', image) # Upload some image data, setting the image location path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # Image direct_url should be visible path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertIn('direct_url', image) # Image direct_url should be visible to non-owner, non-admin user path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json', 'X-Tenant-Id': TENANT2}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertIn('direct_url', image) # Image direct_url should be visible in a list path = self._url('/v2/images') headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text)['images'][0] self.assertIn('direct_url', image) self.stop_servers() def test_image_multiple_location_url_visible(self): self.api_server.show_multiple_locations = True self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image id image = jsonutils.loads(response.text) image_id = image['id'] # Image locations should not be visible before location is set path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertIn('locations', image) self.assertEqual([], image["locations"]) # Upload some image data, setting the image location path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # Image locations should be visible path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertIn('locations', image) loc = image['locations'] self.assertGreater(len(loc), 0) loc = loc[0] self.assertIn('url', loc) self.assertIn('metadata', loc) self.stop_servers() def test_image_direct_url_not_visible(self): self.api_server.show_image_direct_url = False self.start_servers(**self.__dict__.copy()) # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image id image = jsonutils.loads(response.text) image_id = image['id'] # Upload some image data, setting the image location path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data='ZZZZZ') self.assertEqual(204, response.status_code) # Image direct_url should not be visible path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertNotIn('direct_url', image) # Image direct_url should not be visible in a list path = self._url('/v2/images') headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text)['images'][0] self.assertNotIn('direct_url', image) self.stop_servers() class TestImageDirectURLVisibilityWithRegistry(TestImageDirectURLVisibility): def setUp(self): super(TestImageDirectURLVisibilityWithRegistry, self).setUp() self.api_server.data_api = ( 'glance.tests.functional.v2.registry_data_api') self.registry_server.deployment_flavor = 'trusted-auth' class TestImageLocationSelectionStrategy(functional.FunctionalTest): def setUp(self): super(TestImageLocationSelectionStrategy, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' for i in range(3): ret = test_utils.start_http_server("foo_image_id%d" % i, "foo_image%d" % i) setattr(self, 'http_server%d_pid' % i, ret[0]) setattr(self, 'http_port%d' % i, ret[1]) def tearDown(self): for i in range(3): pid = getattr(self, 'http_server%d_pid' % i, None) if pid: os.kill(pid, signal.SIGKILL) super(TestImageLocationSelectionStrategy, self).tearDown() def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'member', } base_headers.update(custom_headers or {}) return base_headers def test_image_locations_with_order_strategy(self): self.api_server.show_image_direct_url = True self.api_server.show_multiple_locations = True self.image_location_quota = 10 self.api_server.location_strategy = 'location_order' preference = "http, swift, filesystem" self.api_server.store_type_location_strategy_preference = preference self.start_servers(**self.__dict__.copy()) # Create an image path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) # Get the image id image = jsonutils.loads(response.text) image_id = image['id'] # Image locations should not be visible before location is set path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertIn('locations', image) self.assertEqual([], image["locations"]) # Update image locations via PATCH path = self._url('/v2/images/%s' % image_id) media_type = 'application/openstack-images-v2.1-json-patch' headers = self._headers({'content-type': media_type}) values = [{'url': 'http://127.0.0.1:%s/foo_image' % self.http_port0, 'metadata': {}}, {'url': 'http://127.0.0.1:%s/foo_image' % self.http_port1, 'metadata': {}}] doc = [{'op': 'replace', 'path': '/locations', 'value': values}] data = jsonutils.dumps(doc) response = requests.patch(path, headers=headers, data=data) self.assertEqual(200, response.status_code) # Image locations should be visible path = self._url('/v2/images/%s' % image_id) headers = self._headers({'Content-Type': 'application/json'}) response = requests.get(path, headers=headers) self.assertEqual(200, response.status_code) image = jsonutils.loads(response.text) self.assertIn('locations', image) self.assertEqual(values, image['locations']) self.assertIn('direct_url', image) self.assertEqual(values[0]['url'], image['direct_url']) self.stop_servers() class TestImageLocationSelectionStrategyWithRegistry( TestImageLocationSelectionStrategy): def setUp(self): super(TestImageLocationSelectionStrategyWithRegistry, self).setUp() self.api_server.data_api = ( 'glance.tests.functional.v2.registry_data_api') self.registry_server.deployment_flavor = 'trusted-auth' class TestImageMembers(functional.FunctionalTest): def setUp(self): super(TestImageMembers, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'fakeauth' self.registry_server.deployment_flavor = 'fakeauth' self.start_servers(**self.__dict__.copy()) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'member', } base_headers.update(custom_headers or {}) return base_headers def test_image_member_lifecycle(self): def get_header(tenant, role=''): auth_token = 'user:%s:%s' % (tenant, role) headers = {'X-Auth-Token': auth_token} return headers # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) owners = ['tenant1', 'tenant2', 'admin'] visibilities = ['public', 'private'] image_fixture = [] for owner in owners: for visibility in visibilities: path = self._url('/v2/images') headers = self._headers({ 'content-type': 'application/json', 'X-Auth-Token': 'createuser:%s:admin' % owner, }) data = jsonutils.dumps({ 'name': '%s-%s' % (owner, visibility), 'visibility': visibility, }) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image_fixture.append(jsonutils.loads(response.text)) # Image list should contain 4 images for tenant1 path = self._url('/v2/images') response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Image list should contain 3 images for TENANT3 path = self._url('/v2/images') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(3, len(images)) # Add Image member for tenant1-private image path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) body = jsonutils.dumps({'member': TENANT3}) response = requests.post(path, headers=get_header('tenant1'), data=body) self.assertEqual(200, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[1]['id'], image_member['image_id']) self.assertEqual(TENANT3, image_member['member_id']) self.assertIn('created_at', image_member) self.assertIn('updated_at', image_member) self.assertEqual('pending', image_member['status']) # Image list should contain 3 images for TENANT3 path = self._url('/v2/images') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(3, len(images)) # Image list should contain 0 shared images for TENANT3 # because default is accepted path = self._url('/v2/images?visibility=shared') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 4 images for TENANT3 with status pending path = self._url('/v2/images?member_status=pending') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Image list should contain 4 images for TENANT3 with status all path = self._url('/v2/images?member_status=all') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Image list should contain 1 image for TENANT3 with status pending # and visibility shared path = self._url('/v2/images?member_status=pending&visibility=shared') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(1, len(images)) self.assertEqual(images[0]['name'], 'tenant1-private') # Image list should contain 0 image for TENANT3 with status rejected # and visibility shared path = self._url('/v2/images?member_status=rejected&visibility=shared') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 0 image for TENANT3 with status accepted # and visibility shared path = self._url('/v2/images?member_status=accepted&visibility=shared') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image list should contain 0 image for TENANT3 with status accepted # and visibility private path = self._url('/v2/images?visibility=private') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Image tenant2-private's image members list should contain no members path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) response = requests.get(path, headers=get_header('tenant2')) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['members'])) # Tenant 1, who is the owner cannot change status of image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_header('tenant1'), data=body) self.assertEqual(403, response.status_code) # Tenant 1, who is the owner can get status of its own image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual('pending', body['status']) self.assertEqual(image_fixture[1]['id'], body['image_id']) self.assertEqual(TENANT3, body['member_id']) # Tenant 3, who is the member can get status of its own status path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual('pending', body['status']) self.assertEqual(image_fixture[1]['id'], body['image_id']) self.assertEqual(TENANT3, body['member_id']) # Tenant 2, who not the owner cannot get status of image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) response = requests.get(path, headers=get_header('tenant2')) self.assertEqual(404, response.status_code) # Tenant 3 can change status of image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_header(TENANT3), data=body) self.assertEqual(200, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[1]['id'], image_member['image_id']) self.assertEqual(TENANT3, image_member['member_id']) self.assertEqual('accepted', image_member['status']) # Image list should contain 4 images for TENANT3 because status is # accepted path = self._url('/v2/images') response = requests.get(path, headers=get_header(TENANT3)) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(4, len(images)) # Tenant 3 invalid status change path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) body = jsonutils.dumps({'status': 'invalid-status'}) response = requests.put(path, headers=get_header(TENANT3), data=body) self.assertEqual(400, response.status_code) # Owner cannot change status of image path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) body = jsonutils.dumps({'status': 'accepted'}) response = requests.put(path, headers=get_header('tenant1'), data=body) self.assertEqual(403, response.status_code) # Add Image member for tenant2-private image path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) body = jsonutils.dumps({'member': TENANT4}) response = requests.post(path, headers=get_header('tenant2'), data=body) self.assertEqual(200, response.status_code) image_member = jsonutils.loads(response.text) self.assertEqual(image_fixture[3]['id'], image_member['image_id']) self.assertEqual(TENANT4, image_member['member_id']) self.assertIn('created_at', image_member) self.assertIn('updated_at', image_member) # Add Image member to public image path = self._url('/v2/images/%s/members' % image_fixture[0]['id']) body = jsonutils.dumps({'member': TENANT2}) response = requests.post(path, headers=get_header('tenant1'), data=body) self.assertEqual(403, response.status_code) # Image tenant1-private's members list should contain 1 member path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(1, len(body['members'])) # Admin can see any members path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) response = requests.get(path, headers=get_header('tenant1', 'admin')) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(1, len(body['members'])) # Image members not found for private image not owned by TENANT 1 path = self._url('/v2/images/%s/members' % image_fixture[3]['id']) response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(404, response.status_code) # Image members forbidden for public image path = self._url('/v2/images/%s/members' % image_fixture[0]['id']) response = requests.get(path, headers=get_header('tenant1')) self.assertIn("Public images do not have members", response.text) self.assertEqual(403, response.status_code) # Image Member Cannot delete Image membership path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) response = requests.delete(path, headers=get_header(TENANT3)) self.assertEqual(403, response.status_code) # Delete Image member path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'], TENANT3)) response = requests.delete(path, headers=get_header('tenant1')) self.assertEqual(204, response.status_code) # Now the image has no members path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(200, response.status_code) body = jsonutils.loads(response.text) self.assertEqual(0, len(body['members'])) # Adding 11 image members should fail since configured limit is 10 path = self._url('/v2/images/%s/members' % image_fixture[1]['id']) for i in range(10): body = jsonutils.dumps({'member': str(uuid.uuid4())}) response = requests.post(path, headers=get_header('tenant1'), data=body) self.assertEqual(200, response.status_code) body = jsonutils.dumps({'member': str(uuid.uuid4())}) response = requests.post(path, headers=get_header('tenant1'), data=body) self.assertEqual(413, response.status_code) # Get Image member should return not found for public image path = self._url('/v2/images/%s/members/%s' % (image_fixture[0]['id'], TENANT3)) response = requests.get(path, headers=get_header('tenant1')) self.assertEqual(404, response.status_code) # Delete Image member should return forbidden for public image path = self._url('/v2/images/%s/members/%s' % (image_fixture[0]['id'], TENANT3)) response = requests.delete(path, headers=get_header('tenant1')) self.assertEqual(403, response.status_code) self.stop_servers() class TestImageMembersWithRegistry(TestImageMembers): def setUp(self): super(TestImageMembersWithRegistry, self).setUp() self.api_server.data_api = ( 'glance.tests.functional.v2.registry_data_api') self.registry_server.deployment_flavor = 'trusted-auth' class TestQuotas(functional.FunctionalTest): def setUp(self): super(TestQuotas, self).setUp() self.cleanup() self.api_server.deployment_flavor = 'noauth' self.registry_server.deployment_flavor = 'trusted-auth' self.user_storage_quota = 100 self.start_servers(**self.__dict__.copy()) def _url(self, path): return 'http://127.0.0.1:%d%s' % (self.api_port, path) def _headers(self, custom_headers=None): base_headers = { 'X-Identity-Status': 'Confirmed', 'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96', 'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e', 'X-Tenant-Id': TENANT1, 'X-Roles': 'member', } base_headers.update(custom_headers or {}) return base_headers def _upload_image_test(self, data_src, expected_status): # Image list should be empty path = self._url('/v2/images') response = requests.get(path, headers=self._headers()) self.assertEqual(200, response.status_code) images = jsonutils.loads(response.text)['images'] self.assertEqual(0, len(images)) # Create an image (with a deployer-defined property) path = self._url('/v2/images') headers = self._headers({'content-type': 'application/json'}) data = jsonutils.dumps({'name': 'testimg', 'type': 'kernel', 'foo': 'bar', 'disk_format': 'aki', 'container_format': 'aki'}) response = requests.post(path, headers=headers, data=data) self.assertEqual(201, response.status_code) image = jsonutils.loads(response.text) image_id = image['id'] # upload data path = self._url('/v2/images/%s/file' % image_id) headers = self._headers({'Content-Type': 'application/octet-stream'}) response = requests.put(path, headers=headers, data=data_src) self.assertEqual(expected_status, response.status_code) # Deletion should work path = self._url('/v2/images/%s' % image_id) response = requests.delete(path, headers=self._headers()) self.assertEqual(204, response.status_code) def test_image_upload_under_quota(self): data = 'x' * (self.user_storage_quota - 1) self._upload_image_test(data, 204) def test_image_upload_exceed_quota(self): data = 'x' * (self.user_storage_quota + 1) self._upload_image_test(data, 413) def test_chunked_image_upload_under_quota(self): def data_gen(): yield 'x' * (self.user_storage_quota - 1) self._upload_image_test(data_gen(), 204) def test_chunked_image_upload_exceed_quota(self): def data_gen(): yield 'x' * (self.user_storage_quota + 1) self._upload_image_test(data_gen(), 413) class TestQuotasWithRegistry(TestQuotas): def setUp(self): super(TestQuotasWithRegistry, self).setUp() self.api_server.data_api = ( 'glance.tests.functional.v2.registry_data_api') self.registry_server.deployment_flavor = 'trusted-auth' glance-12.0.0/glance/tests/functional/test_cors_middleware.py0000664000567000056710000000576412701407047025513 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests cors middleware.""" import httplib2 from glance.tests import functional class TestCORSMiddleware(functional.FunctionalTest): '''Provide a basic smoke test to ensure CORS middleware is active. The tests below provide minimal confirmation that the CORS middleware is active, and may be configured. For comprehensive tests, please consult the test suite in oslo_middleware. ''' def setUp(self): super(TestCORSMiddleware, self).setUp() # Cleanup is handled in teardown of the parent class. self.start_servers(**self.__dict__.copy()) self.http = httplib2.Http() self.api_path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) def test_valid_cors_options_request(self): (r_headers, content) = self.http.request( self.api_path, 'OPTIONS', headers={ 'Origin': 'http://valid.example.com', 'Access-Control-Request-Method': 'GET' }) self.assertEqual(r_headers.status, 200) self.assertIn('access-control-allow-origin', r_headers) self.assertEqual('http://valid.example.com', r_headers['access-control-allow-origin']) def test_invalid_cors_options_request(self): (r_headers, content) = self.http.request( self.api_path, 'OPTIONS', headers={ 'Origin': 'http://invalid.example.com', 'Access-Control-Request-Method': 'GET' }) self.assertEqual(r_headers.status, 200) self.assertNotIn('access-control-allow-origin', r_headers) def test_valid_cors_get_request(self): (r_headers, content) = self.http.request( self.api_path, 'GET', headers={ 'Origin': 'http://valid.example.com' }) self.assertEqual(r_headers.status, 200) self.assertIn('access-control-allow-origin', r_headers) self.assertEqual('http://valid.example.com', r_headers['access-control-allow-origin']) def test_invalid_cors_get_request(self): (r_headers, content) = self.http.request( self.api_path, 'GET', headers={ 'Origin': 'http://invalid.example.com' }) self.assertEqual(r_headers.status, 200) self.assertNotIn('access-control-allow-origin', r_headers) glance-12.0.0/glance/tests/functional/v1/0000775000567000056710000000000012701407204021244 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/functional/v1/test_misc.py0000664000567000056710000001076012701407047023621 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import os import httplib2 from oslo_serialization import jsonutils from oslo_utils import units from glance.tests import functional from glance.tests.utils import minimal_headers FIVE_KB = 5 * units.Ki FIVE_GB = 5 * units.Gi class TestMiscellaneous(functional.FunctionalTest): """Some random tests for various bugs and stuff""" def setUp(self): super(TestMiscellaneous, self).setUp() # NOTE(sirp): This is needed in case we are running the tests under an # environment in which OS_AUTH_STRATEGY=keystone. The test server we # spin up won't have keystone support, so we need to switch to the # NoAuth strategy. os.environ['OS_AUTH_STRATEGY'] = 'noauth' os.environ['OS_AUTH_URL'] = '' def test_api_response_when_image_deleted_from_filesystem(self): """ A test for LP bug #781410 -- glance should fail more gracefully on requests for images that have been removed from the fs """ self.cleanup() self.start_servers() # 1. POST /images with public image named Image1 # attribute and no custom properties. Verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) # 2. REMOVE the image from the filesystem image_path = "%s/images/%s" % (self.test_dir, data['image']['id']) os.remove(image_path) # 3. HEAD /images/1 # Verify image found now path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, data['image']['id']) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) # 4. GET /images/1 # Verify the api throws the appropriate 404 error path = "http://%s:%d/v1/images/1" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(404, response.status) self.stop_servers() def test_exception_not_eaten_from_registry_to_api(self): """ A test for LP bug #704854 -- Exception thrown by registry server is consumed by API server. We start both servers daemonized. We then use Glance API to try adding an image that does not meet validation requirements on the registry server and test that the error returned from the API server is appropriate """ self.cleanup() self.start_servers() api_port = self.api_port path = 'http://127.0.0.1:%d/v1/images' % api_port http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'ImageName', 'X-Image-Meta-Disk-Format': 'Invalid', } ignored, content = http.request(path, 'POST', headers=headers) self.assertIn('Invalid disk format', content, "Could not find 'Invalid disk format' " "in output: %s" % content) self.stop_servers() glance-12.0.0/glance/tests/functional/v1/test_multiprocessing.py0000664000567000056710000000504512701407047026115 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import httplib2 import psutil # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.tests import functional from glance.tests.utils import execute class TestMultiprocessing(functional.FunctionalTest): """Functional tests for the bin/glance CLI tool""" def setUp(self): self.workers = 2 super(TestMultiprocessing, self).setUp() def test_multiprocessing(self): """Spin up the api servers with multiprocessing on""" self.cleanup() self.start_servers(**self.__dict__.copy()) path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual(b'{"images": []}', content) self.stop_servers() def _get_children(self): api_pid = self.api_server.process_pid process = psutil.Process(api_pid) children = process.get_children() pids = [str(child.pid) for child in children] return pids def test_interrupt_avoids_respawn_storm(self): """ Ensure an interrupt signal does not cause a respawn storm. See bug #978130 """ self.start_servers(**self.__dict__.copy()) children = self._get_children() cmd = "kill -INT %s" % ' '.join(children) execute(cmd, raise_error=True) for _ in range(9): # Yeah. This totally isn't a race condition. Randomly fails # set at 0.05. Works most of the time at 0.10 time.sleep(0.10) # ensure number of children hasn't grown self.assertGreaterEqual(len(children), len(self._get_children())) for child in self._get_children(): # ensure no new children spawned self.assertIn(child, children, child) self.stop_servers() glance-12.0.0/glance/tests/functional/v1/__init__.py0000664000567000056710000000000012701407047023350 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/tests/functional/v1/test_api.py0000664000567000056710000011013412701407051023426 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Functional test case that utilizes httplib2 against the API server""" import hashlib import httplib2 import sys from oslo_serialization import jsonutils from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.tests import functional from glance.tests.utils import minimal_headers from glance.tests.utils import skip_if_disabled FIVE_KB = 5 * units.Ki FIVE_GB = 5 * units.Gi class TestApi(functional.FunctionalTest): """Functional tests using httplib2 against the API server""" def _check_image_create(self, headers, status=201, image_data="*" * FIVE_KB): # performs image_create request, checks the response and returns # content http = httplib2.Http() path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) response, content = http.request( path, 'POST', headers=headers, body=image_data) self.assertEqual(status, response.status) return content def test_checksum_32_chars_at_image_create(self): self.cleanup() self.start_servers(**self.__dict__.copy()) headers = minimal_headers('Image1') image_data = "*" * FIVE_KB # checksum can be no longer that 32 characters (String(32)) headers['X-Image-Meta-Checksum'] = 'x' * 42 content = self._check_image_create(headers, 400) self.assertIn("Invalid checksum", content) # test positive case as well headers['X-Image-Meta-Checksum'] = hashlib.md5(image_data).hexdigest() self._check_image_create(headers) def test_param_int_too_large_at_create(self): # currently 2 params min_disk/min_ram can cause DBError on save self.cleanup() self.start_servers(**self.__dict__.copy()) # Integer field can't be greater than max 8-byte signed integer for param in ['min_disk', 'min_ram']: headers = minimal_headers('Image1') # check that long numbers result in 400 headers['X-Image-Meta-%s' % param] = str(sys.maxint + 1) content = self._check_image_create(headers, 400) self.assertIn("'%s' value out of range" % param, content) # check that integers over 4 byte result in 400 headers['X-Image-Meta-%s' % param] = str(2 ** 31) content = self._check_image_create(headers, 400) self.assertIn("'%s' value out of range" % param, content) # verify positive case as well headers['X-Image-Meta-%s' % param] = str((2 ** 31) - 1) self._check_image_create(headers) @skip_if_disabled def test_get_head_simple_post(self): """ We test the following sequential series of actions: 0. GET /images - Verify no public images 1. GET /images/detail - Verify no public images 2. POST /images with public image named Image1 and no custom properties - Verify 201 returned 3. HEAD image - Verify HTTP headers have correct information we just added 4. GET image - Verify all information on image we just added is correct 5. GET /images - Verify the image we just added is returned 6. GET /images/detail - Verify the image we just added is returned 7. PUT image with custom properties of "distro" and "arch" - Verify 200 returned 8. PUT image with too many custom properties - Verify 413 returned 9. GET image - Verify updated information about image was stored 10. PUT image - Remove a previously existing property. 11. PUT image - Add a previously deleted property. 12. PUT image/members/member1 - Add member1 to image 13. PUT image/members/member2 - Add member2 to image 14. GET image/members - List image members 15. DELETE image/members/member1 - Delete image member1 16. PUT image/members - Attempt to replace members with an overlimit amount 17. PUT image/members/member11 - Attempt to add a member while at limit 18. POST /images with another public image named Image2 - attribute and three custom properties, "distro", "arch" & "foo" - Verify a 200 OK is returned 19. HEAD image2 - Verify image2 found now 20. GET /images - Verify 2 public images 21. GET /images with filter on user-defined property "distro". - Verify both images are returned 22. GET /images with filter on user-defined property 'distro' but - with non-existent value. Verify no images are returned 23. GET /images with filter on non-existent user-defined property - "boo". Verify no images are returned 24. GET /images with filter 'arch=i386' - Verify only image2 is returned 25. GET /images with filter 'arch=x86_64' - Verify only image1 is returned 26. GET /images with filter 'foo=bar' - Verify only image2 is returned 27. DELETE image1 - Delete image 28. GET image/members - List deleted image members 29. PUT image/members/member2 - Update existing member2 of deleted image 30. PUT image/members/member3 - Add member3 to deleted image 31. DELETE image/members/member2 - Delete member2 from deleted image 32. DELETE image2 - Delete image 33. GET /images - Verify no images are listed """ self.cleanup() self.start_servers(**self.__dict__.copy()) # 0. GET /images # Verify no public images path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 1. GET /images/detail # Verify no public images path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual('{"images": []}', content) # 2. POST /images with public image named Image1 # attribute and no custom properties. Verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) # 3. HEAD image # Verify image found now path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) # 4. GET image # Verify all information on image we just added is correct path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_image_headers = { 'x-image-meta-id': image_id, 'x-image-meta-name': 'Image1', 'x-image-meta-is_public': 'True', 'x-image-meta-status': 'active', 'x-image-meta-disk_format': 'raw', 'x-image-meta-container_format': 'ovf', 'x-image-meta-size': str(FIVE_KB)} expected_std_headers = { 'content-length': str(FIVE_KB), 'content-type': 'application/octet-stream'} for expected_key, expected_value in expected_image_headers.items(): self.assertEqual(expected_value, response[expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, response[expected_key])) for expected_key, expected_value in expected_std_headers.items(): self.assertEqual(expected_value, response[expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, response[expected_key])) self.assertEqual("*" * FIVE_KB, content) self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(), hashlib.md5(content).hexdigest()) # 5. GET /images # Verify one public image path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = {"images": [ {"container_format": "ovf", "disk_format": "raw", "id": image_id, "name": "Image1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120}]} self.assertEqual(expected_result, jsonutils.loads(content)) # 6. GET /images/detail # Verify image and all its metadata path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_image = { "status": "active", "name": "Image1", "deleted": False, "container_format": "ovf", "disk_format": "raw", "id": image_id, "is_public": True, "deleted_at": None, "properties": {}, "size": 5120} image = jsonutils.loads(content) for expected_key, expected_value in expected_image.items(): self.assertEqual(expected_value, image['images'][0][expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, image['images'][0][expected_key])) # 7. PUT image with custom properties of "distro" and "arch" # Verify 200 returned headers = {'X-Image-Meta-Property-Distro': 'Ubuntu', 'X-Image-Meta-Property-Arch': 'x86_64'} path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual("x86_64", data['image']['properties']['arch']) self.assertEqual("Ubuntu", data['image']['properties']['distro']) # 8. PUT image with too many custom properties # Verify 413 returned headers = {} for i in range(11): # configured limit is 10 headers['X-Image-Meta-Property-foo%d' % i] = 'bar' path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(413, response.status) # 9. GET /images/detail # Verify image and all its metadata path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_image = { "status": "active", "name": "Image1", "deleted": False, "container_format": "ovf", "disk_format": "raw", "id": image_id, "is_public": True, "deleted_at": None, "properties": {'distro': 'Ubuntu', 'arch': 'x86_64'}, "size": 5120} image = jsonutils.loads(content) for expected_key, expected_value in expected_image.items(): self.assertEqual(expected_value, image['images'][0][expected_key], "For key '%s' expected header value '%s'. " "Got '%s'" % (expected_key, expected_value, image['images'][0][expected_key])) # 10. PUT image and remove a previously existing property. headers = {'X-Image-Meta-Property-Arch': 'x86_64'} path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'][0] self.assertEqual(1, len(data['properties'])) self.assertEqual("x86_64", data['properties']['arch']) # 11. PUT image and add a previously deleted property. headers = {'X-Image-Meta-Property-Distro': 'Ubuntu', 'X-Image-Meta-Property-Arch': 'x86_64'} path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) data = jsonutils.loads(content) path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content)['images'][0] self.assertEqual(2, len(data['properties'])) self.assertEqual("x86_64", data['properties']['arch']) self.assertEqual("Ubuntu", data['properties']['distro']) self.assertNotEqual(data['created_at'], data['updated_at']) # 12. Add member to image path = ("http://%s:%d/v1/images/%s/members/pattieblack" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(204, response.status) # 13. Add member to image path = ("http://%s:%d/v1/images/%s/members/pattiewhite" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(204, response.status) # 14. List image members path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) data = jsonutils.loads(content) self.assertEqual(2, len(data['members'])) self.assertEqual('pattieblack', data['members'][0]['member_id']) self.assertEqual('pattiewhite', data['members'][1]['member_id']) # 15. Delete image member path = ("http://%s:%d/v1/images/%s/members/pattieblack" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(204, response.status) # 16. Attempt to replace members with an overlimit amount # Adding 11 image members should fail since configured limit is 10 path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) memberships = [] for i in range(11): member_id = "foo%d" % i memberships.append(dict(member_id=member_id)) http = httplib2.Http() body = jsonutils.dumps(dict(memberships=memberships)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(413, response.status) # 17. Attempt to add a member while at limit # Adding an 11th member should fail since configured limit is 10 path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) memberships = [] for i in range(10): member_id = "foo%d" % i memberships.append(dict(member_id=member_id)) http = httplib2.Http() body = jsonutils.dumps(dict(memberships=memberships)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(204, response.status) path = ("http://%s:%d/v1/images/%s/members/fail_me" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(413, response.status) # 18. POST /images with another public image named Image2 # attribute and three custom properties, "distro", "arch" & "foo". # Verify a 200 OK is returned image_data = "*" * FIVE_KB headers = minimal_headers('Image2') headers['X-Image-Meta-Property-Distro'] = 'Ubuntu' headers['X-Image-Meta-Property-Arch'] = 'i386' headers['X-Image-Meta-Property-foo'] = 'bar' path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) image2_id = data['image']['id'] self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image2", data['image']['name']) self.assertTrue(data['image']['is_public']) self.assertEqual('Ubuntu', data['image']['properties']['distro']) self.assertEqual('i386', data['image']['properties']['arch']) self.assertEqual('bar', data['image']['properties']['foo']) # 19. HEAD image2 # Verify image2 found now path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image2_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image2", response['x-image-meta-name']) # 20. GET /images # Verify 2 public images path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # 21. GET /images with filter on user-defined property 'distro'. # Verify both images are returned path = "http://%s:%d/v1/images?property-distro=Ubuntu" % ( "127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(2, len(images)) self.assertEqual(image2_id, images[0]['id']) self.assertEqual(image_id, images[1]['id']) # 22. GET /images with filter on user-defined property 'distro' but # with non-existent value. Verify no images are returned path = "http://%s:%d/v1/images?property-distro=fedora" % ( "127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(0, len(images)) # 23. GET /images with filter on non-existent user-defined property # 'boo'. Verify no images are returned path = "http://%s:%d/v1/images?property-boo=bar" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(0, len(images)) # 24. GET /images with filter 'arch=i386' # Verify only image2 is returned path = "http://%s:%d/v1/images?property-arch=i386" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # 25. GET /images with filter 'arch=x86_64' # Verify only image1 is returned path = "http://%s:%d/v1/images?property-arch=x86_64" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(1, len(images)) self.assertEqual(image_id, images[0]['id']) # 26. GET /images with filter 'foo=bar' # Verify only image2 is returned path = "http://%s:%d/v1/images?property-foo=bar" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(1, len(images)) self.assertEqual(image2_id, images[0]['id']) # 27. DELETE image1 path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 28. Try to list members of deleted image path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(404, response.status) # 29. Try to update member of deleted image path = ("http://%s:%d/v1/images/%s/members" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}] body = jsonutils.dumps(dict(memberships=fixture)) response, content = http.request(path, 'PUT', body=body) self.assertEqual(404, response.status) # 30. Try to add member to deleted image path = ("http://%s:%d/v1/images/%s/members/chickenpattie" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'PUT') self.assertEqual(404, response.status) # 31. Try to delete member of deleted image path = ("http://%s:%d/v1/images/%s/members/pattieblack" % ("127.0.0.1", self.api_port, image_id)) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(404, response.status) # 32. DELETE image2 path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image2_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 33. GET /images # Verify no images are listed path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) images = jsonutils.loads(content)['images'] self.assertEqual(0, len(images)) # 34. HEAD /images/detail path = "http://%s:%d/v1/images/detail" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(405, response.status) self.assertEqual('GET', response.get('allow')) self.stop_servers() def test_download_non_exists_image_raises_http_forbidden(self): """ We test the following sequential series of actions: 0. POST /images with public image named Image1 and no custom properties - Verify 201 returned 1. HEAD image - Verify HTTP headers have correct information we just added 2. GET image - Verify all information on image we just added is correct 3. DELETE image1 - Delete the newly added image 4. GET image - Verify that 403 HTTPForbidden exception is raised prior to 404 HTTPNotFound """ self.cleanup() self.start_servers(**self.__dict__.copy()) image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) # 1. HEAD image # Verify image found now path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) # 2. GET /images # Verify one public image path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = {"images": [ {"container_format": "ovf", "disk_format": "raw", "id": image_id, "name": "Image1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120}]} self.assertEqual(expected_result, jsonutils.loads(content)) # 3. DELETE image1 path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 4. GET image # Verify that 403 HTTPForbidden exception is raised prior to # 404 HTTPNotFound rules = {"download_image": '!'} self.set_policy_rules(rules) path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(403, response.status) self.stop_servers() def test_download_non_exists_image_raises_http_not_found(self): """ We test the following sequential series of actions: 0. POST /images with public image named Image1 and no custom properties - Verify 201 returned 1. HEAD image - Verify HTTP headers have correct information we just added 2. GET image - Verify all information on image we just added is correct 3. DELETE image1 - Delete the newly added image 4. GET image - Verify that 404 HTTPNotFound exception is raised """ self.cleanup() self.start_servers(**self.__dict__.copy()) image_data = "*" * FIVE_KB headers = minimal_headers('Image1') path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status) data = jsonutils.loads(content) image_id = data['image']['id'] self.assertEqual(hashlib.md5(image_data).hexdigest(), data['image']['checksum']) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("Image1", data['image']['name']) self.assertTrue(data['image']['is_public']) # 1. HEAD image # Verify image found now path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual("Image1", response['x-image-meta-name']) # 2. GET /images # Verify one public image path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) expected_result = {"images": [ {"container_format": "ovf", "disk_format": "raw", "id": image_id, "name": "Image1", "checksum": "c2e5db72bd7fd153f53ede5da5a06de3", "size": 5120}]} self.assertEqual(expected_result, jsonutils.loads(content)) # 3. DELETE image1 path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # 4. GET image # Verify that 404 HTTPNotFound exception is raised path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(404, response.status) self.stop_servers() def test_status_cannot_be_manipulated_directly(self): self.cleanup() self.start_servers(**self.__dict__.copy()) headers = minimal_headers('Image1') # Create a 'queued' image http = httplib2.Http() headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Disk-Format': 'raw', 'X-Image-Meta-Container-Format': 'bare'} path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) response, content = http.request(path, 'POST', headers=headers, body=None) self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('queued', image['status']) # Ensure status of 'queued' image can't be changed path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, image['id']) http = httplib2.Http() headers = {'X-Image-Meta-Status': 'active'} response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(403, response.status) response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('queued', response['x-image-meta-status']) # We allow 'setting' to the same status http = httplib2.Http() headers = {'X-Image-Meta-Status': 'queued'} response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('queued', response['x-image-meta-status']) # Make image active http = httplib2.Http() headers = {'Content-Type': 'application/octet-stream'} response, content = http.request(path, 'PUT', headers=headers, body='data') self.assertEqual(200, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) # Ensure status of 'active' image can't be changed http = httplib2.Http() headers = {'X-Image-Meta-Status': 'queued'} response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(403, response.status) response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('active', response['x-image-meta-status']) # We allow 'setting' to the same status http = httplib2.Http() headers = {'X-Image-Meta-Status': 'active'} response, content = http.request(path, 'PUT', headers=headers) self.assertEqual(200, response.status) response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) self.assertEqual('active', response['x-image-meta-status']) # Create a 'queued' image, ensure 'status' header is ignored http = httplib2.Http() path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Status': 'active'} response, content = http.request(path, 'POST', headers=headers, body=None) self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('queued', image['status']) # Create an 'active' image, ensure 'status' header is ignored http = httplib2.Http() path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Disk-Format': 'raw', 'X-Image-Meta-Status': 'queued', 'X-Image-Meta-Container-Format': 'bare'} response, content = http.request(path, 'POST', headers=headers, body='data') self.assertEqual(201, response.status) image = jsonutils.loads(content)['image'] self.assertEqual('active', image['status']) self.stop_servers() glance-12.0.0/glance/tests/functional/v1/test_copy_to_file.py0000664000567000056710000002650012701407047025340 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2012 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests copying images to a Glance API server which uses a filesystem- based storage backend. """ import hashlib import tempfile import time import httplib2 from oslo_serialization import jsonutils from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.tests import functional from glance.tests.functional.store_utils import get_http_uri from glance.tests.functional.store_utils import setup_http from glance.tests.utils import skip_if_disabled FIVE_KB = 5 * units.Ki class TestCopyToFile(functional.FunctionalTest): """ Functional tests for copying images from the HTTP storage backend to file """ def _do_test_copy_from(self, from_store, get_uri): """ Ensure we can copy from an external image in from_store. """ self.cleanup() self.start_servers(**self.__dict__.copy()) setup_http(self) # POST /images with public image to be stored in from_store, # to stand in for the 'external' image image_data = "*" * FIVE_KB headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': 'external', 'X-Image-Meta-Store': from_store, 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True'} path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers, body=image_data) self.assertEqual(201, response.status, content) data = jsonutils.loads(content) original_image_id = data['image']['id'] copy_from = get_uri(self, original_image_id) # POST /images with public image copied from_store (to file) headers = {'X-Image-Meta-Name': 'copied', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Glance-API-Copy-From': copy_from} path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status, content) data = jsonutils.loads(content) copy_image_id = data['image']['id'] self.assertNotEqual(copy_image_id, original_image_id) # GET image and make sure image content is as expected path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, copy_image_id) def _await_status(expected_status): for i in range(100): time.sleep(0.01) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) if response['x-image-meta-status'] == expected_status: return self.fail('unexpected image status %s' % response['x-image-meta-status']) _await_status('active') http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual(str(FIVE_KB), response['content-length']) self.assertEqual("*" * FIVE_KB, content) self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(), hashlib.md5(content).hexdigest()) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("copied", data['image']['name']) # DELETE original image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, original_image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) # GET image again to make sure the existence of the original # image in from_store is not depended on path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, copy_image_id) http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual(str(FIVE_KB), response['content-length']) self.assertEqual("*" * FIVE_KB, content) self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(), hashlib.md5(content).hexdigest()) self.assertEqual(FIVE_KB, data['image']['size']) self.assertEqual("copied", data['image']['name']) # DELETE copied image path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, copy_image_id) http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) self.stop_servers() @skip_if_disabled def test_copy_from_http_store(self): """ Ensure we can copy from an external image in HTTP store. """ self._do_test_copy_from('file', get_http_uri) @skip_if_disabled def test_copy_from_http_exists(self): """Ensure we can copy from an external image in HTTP.""" self.cleanup() self.start_servers(**self.__dict__.copy()) setup_http(self) copy_from = get_http_uri(self, 'foobar') # POST /images with public image copied from HTTP (to file) headers = {'X-Image-Meta-Name': 'copied', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Glance-API-Copy-From': copy_from} path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers) self.assertEqual(201, response.status, content) data = jsonutils.loads(content) copy_image_id = data['image']['id'] self.assertEqual('queued', data['image']['status'], content) path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", self.api_port, copy_image_id) def _await_status(expected_status): for i in range(100): time.sleep(0.01) http = httplib2.Http() response, content = http.request(path, 'HEAD') self.assertEqual(200, response.status) if response['x-image-meta-status'] == expected_status: return self.fail('unexpected image status %s' % response['x-image-meta-status']) _await_status('active') # GET image and make sure image content is as expected http = httplib2.Http() response, content = http.request(path, 'GET') self.assertEqual(200, response.status) self.assertEqual(str(FIVE_KB), response['content-length']) self.assertEqual("*" * FIVE_KB, content) self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(), hashlib.md5(content).hexdigest()) # DELETE copied image http = httplib2.Http() response, content = http.request(path, 'DELETE') self.assertEqual(200, response.status) self.stop_servers() @skip_if_disabled def test_copy_from_http_nonexistent_location_url(self): # Ensure HTTP 404 response returned when try to create # image with non-existent http location URL. self.cleanup() self.start_servers(**self.__dict__.copy()) setup_http(self) uri = get_http_uri(self, 'foobar') copy_from = uri.replace('images', 'snafu') # POST /images with public image copied from HTTP (to file) headers = {'X-Image-Meta-Name': 'copied', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Glance-API-Copy-From': copy_from} path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers) self.assertEqual(404, response.status, content) expected = 'HTTP datastore could not find image at URI.' self.assertIn(expected, content) self.stop_servers() @skip_if_disabled def test_copy_from_file(self): """ Ensure we can't copy from file """ self.cleanup() self.start_servers(**self.__dict__.copy()) with tempfile.NamedTemporaryFile() as image_file: image_file.write("XXX") image_file.flush() copy_from = 'file://' + image_file.name # POST /images with public image copied from file (to file) headers = {'X-Image-Meta-Name': 'copied', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Glance-API-Copy-From': copy_from} path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers) self.assertEqual(400, response.status, content) expected = 'External sources are not supported: \'%s\'' % copy_from msg = 'expected "%s" in "%s"' % (expected, content) self.assertIn(expected, content, msg) self.stop_servers() @skip_if_disabled def test_copy_from_swift_config(self): """ Ensure we can't copy from swift+config """ self.cleanup() self.start_servers(**self.__dict__.copy()) # POST /images with public image copied from file (to file) headers = {'X-Image-Meta-Name': 'copied', 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', 'X-Image-Meta-Is-Public': 'True', 'X-Glance-API-Copy-From': 'swift+config://xxx'} path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port) http = httplib2.Http() response, content = http.request(path, 'POST', headers=headers) self.assertEqual(400, response.status, content) expected = 'External sources are not supported: \'swift+config://xxx\'' msg = 'expected "%s" in "%s"' % (expected, content) self.assertIn(expected, content, msg) self.stop_servers() glance-12.0.0/glance/opts.py0000664000567000056710000001304312701407047016757 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'list_api_opts', 'list_registry_opts', 'list_scrubber_opts', 'list_cache_opts', 'list_manage_opts', 'list_artifacts_opts' ] import copy import itertools import glance.api.middleware.context import glance.api.versions import glance.async.taskflow_executor import glance.common.config import glance.common.location_strategy import glance.common.location_strategy.store_type import glance.common.property_utils import glance.common.rpc import glance.common.wsgi import glance.image_cache import glance.image_cache.drivers.sqlite import glance.notifier import glance.registry import glance.registry.client import glance.registry.client.v1.api import glance.scrubber _api_opts = [ (None, list(itertools.chain( glance.api.middleware.context.context_opts, glance.api.versions.versions_opts, glance.common.config.common_opts, glance.common.location_strategy.location_strategy_opts, glance.common.property_utils.property_opts, glance.common.rpc.rpc_opts, glance.common.wsgi.bind_opts, glance.common.wsgi.eventlet_opts, glance.common.wsgi.socket_opts, glance.image_cache.drivers.sqlite.sqlite_opts, glance.image_cache.image_cache_opts, glance.notifier.notifier_opts, glance.registry.registry_addr_opts, glance.registry.client.registry_client_ctx_opts, glance.registry.client.registry_client_opts, glance.registry.client.v1.api.registry_client_ctx_opts, glance.scrubber.scrubber_opts))), ('image_format', glance.common.config.image_format_opts), ('task', glance.common.config.task_opts), ('taskflow_executor', glance.async.taskflow_executor.taskflow_executor_opts), ('store_type_location_strategy', glance.common.location_strategy.store_type.store_type_opts), ('profiler', glance.common.wsgi.profiler_opts), ('paste_deploy', glance.common.config.paste_deploy_opts) ] _registry_opts = [ (None, list(itertools.chain( glance.api.middleware.context.context_opts, glance.common.config.common_opts, glance.common.wsgi.bind_opts, glance.common.wsgi.socket_opts, glance.common.wsgi.eventlet_opts))), ('profiler', glance.common.wsgi.profiler_opts), ('paste_deploy', glance.common.config.paste_deploy_opts) ] _scrubber_opts = [ (None, list(itertools.chain( glance.common.config.common_opts, glance.scrubber.scrubber_opts, glance.scrubber.scrubber_cmd_opts, glance.scrubber.scrubber_cmd_cli_opts, glance.registry.client.registry_client_opts, glance.registry.client.registry_client_ctx_opts, glance.registry.registry_addr_opts))), ] _cache_opts = [ (None, list(itertools.chain( glance.common.config.common_opts, glance.image_cache.drivers.sqlite.sqlite_opts, glance.image_cache.image_cache_opts, glance.registry.registry_addr_opts, glance.registry.client.registry_client_ctx_opts))), ] _manage_opts = [ (None, []) ] _artifacts_opts = [ (None, list(itertools.chain( glance.api.middleware.context.context_opts, glance.api.versions.versions_opts, glance.common.wsgi.bind_opts, glance.common.wsgi.eventlet_opts, glance.common.wsgi.socket_opts, glance.common.wsgi.profiler_opts, glance.notifier.notifier_opts))), ('paste_deploy', glance.common.config.paste_deploy_opts) ] def list_api_opts(): """Return a list of oslo_config options available in Glance API service. Each element of the list is a tuple. The first element is the name of the group under which the list of elements in the second element will be registered. A group name of None corresponds to the [DEFAULT] group in config files. This function is also discoverable via the 'glance.api' entry point under the 'oslo_config.opts' namespace. The purpose of this is to allow tools like the Oslo sample config file generator to discover the options exposed to users by Glance. :returns: a list of (group_name, opts) tuples """ return [(g, copy.deepcopy(o)) for g, o in _api_opts] def list_registry_opts(): """Return a list of oslo_config options available in Glance Registry service. """ return [(g, copy.deepcopy(o)) for g, o in _registry_opts] def list_scrubber_opts(): """Return a list of oslo_config options available in Glance Scrubber service. """ return [(g, copy.deepcopy(o)) for g, o in _scrubber_opts] def list_cache_opts(): """Return a list of oslo_config options available in Glance Cache service. """ return [(g, copy.deepcopy(o)) for g, o in _cache_opts] def list_manage_opts(): """Return a list of oslo_config options available in Glance manage.""" return [(g, copy.deepcopy(o)) for g, o in _manage_opts] def list_artifacts_opts(): """Return a list of oslo_config options available in Glance artifacts""" return [(g, copy.deepcopy(o)) for g, o in _artifacts_opts] glance-12.0.0/glance/schema.py0000664000567000056710000001760212701407047017237 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonschema from oslo_utils import encodeutils import six from glance.common import exception from glance.i18n import _ class Schema(object): def __init__(self, name, properties=None, links=None, required=None, definitions=None): self.name = name if properties is None: properties = {} self.properties = properties self.links = links self.required = required self.definitions = definitions def validate(self, obj): try: jsonschema.validate(obj, self.raw()) except jsonschema.ValidationError as e: reason = encodeutils.exception_to_unicode(e) raise exception.InvalidObject(schema=self.name, reason=reason) def filter(self, obj): filtered = {} for key, value in six.iteritems(obj): if self._filter_func(self.properties, key): filtered[key] = value # NOTE(flaper87): This exists to allow for v1, null properties, # to be used with the V2 API. During Kilo, it was allowed for the # later to return None values without considering that V1 allowed # for custom properties to be None, which is something V2 doesn't # allow for. This small hack here will set V1 custom `None` pro- # perties to an empty string so that they will be updated along # with the image (if an update happens). # # We could skip the properties that are `None` but that would bring # back the behavior we moved away from. Note that we can't consider # doing a schema migration because we don't know which properties # are "custom" and which came from `schema-image` if those custom # properties were created with v1. if key not in self.properties and value is None: filtered[key] = '' return filtered @staticmethod def _filter_func(properties, key): return key in properties def merge_properties(self, properties): # Ensure custom props aren't attempting to override base props original_keys = set(self.properties.keys()) new_keys = set(properties.keys()) intersecting_keys = original_keys.intersection(new_keys) conflicting_keys = [k for k in intersecting_keys if self.properties[k] != properties[k]] if conflicting_keys: props = ', '.join(conflicting_keys) reason = _("custom properties (%(props)s) conflict " "with base properties") raise exception.SchemaLoadError(reason=reason % {'props': props}) self.properties.update(properties) def raw(self): raw = { 'name': self.name, 'properties': self.properties, 'additionalProperties': False, } if self.definitions: raw['definitions'] = self.definitions if self.required: raw['required'] = self.required if self.links: raw['links'] = self.links return raw def minimal(self): minimal = { 'name': self.name, 'properties': self.properties } if self.definitions: minimal['definitions'] = self.definitions if self.required: minimal['required'] = self.required return minimal class PermissiveSchema(Schema): @staticmethod def _filter_func(properties, key): return True def raw(self): raw = super(PermissiveSchema, self).raw() raw['additionalProperties'] = {'type': 'string'} return raw def minimal(self): minimal = super(PermissiveSchema, self).raw() return minimal class CollectionSchema(object): def __init__(self, name, item_schema): self.name = name self.item_schema = item_schema def raw(self): definitions = None if self.item_schema.definitions: definitions = self.item_schema.definitions self.item_schema.definitions = None raw = { 'name': self.name, 'properties': { self.name: { 'type': 'array', 'items': self.item_schema.raw(), }, 'first': {'type': 'string'}, 'next': {'type': 'string'}, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'first', 'href': '{first}'}, {'rel': 'next', 'href': '{next}'}, {'rel': 'describedby', 'href': '{schema}'}, ], } if definitions: raw['definitions'] = definitions self.item_schema.definitions = definitions return raw def minimal(self): definitions = None if self.item_schema.definitions: definitions = self.item_schema.definitions self.item_schema.definitions = None minimal = { 'name': self.name, 'properties': { self.name: { 'type': 'array', 'items': self.item_schema.minimal(), }, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'describedby', 'href': '{schema}'}, ], } if definitions: minimal['definitions'] = definitions self.item_schema.definitions = definitions return minimal class DictCollectionSchema(Schema): def __init__(self, name, item_schema): self.name = name self.item_schema = item_schema def raw(self): definitions = None if self.item_schema.definitions: definitions = self.item_schema.definitions self.item_schema.definitions = None raw = { 'name': self.name, 'properties': { self.name: { 'type': 'object', 'additionalProperties': self.item_schema.raw(), }, 'first': {'type': 'string'}, 'next': {'type': 'string'}, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'first', 'href': '{first}'}, {'rel': 'next', 'href': '{next}'}, {'rel': 'describedby', 'href': '{schema}'}, ], } if definitions: raw['definitions'] = definitions self.item_schema.definitions = definitions return raw def minimal(self): definitions = None if self.item_schema.definitions: definitions = self.item_schema.definitions self.item_schema.definitions = None minimal = { 'name': self.name, 'properties': { self.name: { 'type': 'object', 'additionalProperties': self.item_schema.minimal(), }, 'schema': {'type': 'string'}, }, 'links': [ {'rel': 'describedby', 'href': '{schema}'}, ], } if definitions: minimal['definitions'] = definitions self.item_schema.definitions = definitions return minimal glance-12.0.0/glance/__init__.py0000664000567000056710000000000012701407047017516 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/0000775000567000056710000000000012701407204017262 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/__init__.py0000664000567000056710000000201112701407047021372 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Registry API """ from oslo_config import cfg from glance.i18n import _ registry_addr_opts = [ cfg.StrOpt('registry_host', default='0.0.0.0', help=_('Address to find the registry server.')), cfg.PortOpt('registry_port', default=9191, help=_('Port the registry server is listening on.')), ] CONF = cfg.CONF CONF.register_opts(registry_addr_opts) glance-12.0.0/glance/registry/api/0000775000567000056710000000000012701407204020033 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/api/__init__.py0000664000567000056710000000235012701407047022151 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from glance.common import wsgi from glance.registry.api import v1 from glance.registry.api import v2 CONF = cfg.CONF CONF.import_opt('enable_v1_registry', 'glance.common.config') CONF.import_opt('enable_v2_registry', 'glance.common.config') class API(wsgi.Router): """WSGI entry point for all Registry requests.""" def __init__(self, mapper): mapper = mapper or wsgi.APIMapper() if CONF.enable_v1_registry: v1.init(mapper) if CONF.enable_v2_registry: v2.init(mapper) super(API, self).__init__(mapper) glance-12.0.0/glance/registry/api/v2/0000775000567000056710000000000012701407204020362 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/api/v2/__init__.py0000664000567000056710000000214512701407047022502 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import wsgi from glance.registry.api.v2 import rpc def init(mapper): rpc_resource = rpc.create_resource() mapper.connect("/rpc", controller=rpc_resource, conditions=dict(method=["POST"]), action="__call__") class API(wsgi.Router): """WSGI entry point for all Registry requests.""" def __init__(self, mapper): mapper = mapper or wsgi.APIMapper() init(mapper) super(API, self).__init__(mapper) glance-12.0.0/glance/registry/api/v2/rpc.py0000664000567000056710000000312512701407047021526 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ RPC Controller """ from oslo_config import cfg from glance.common import rpc from glance.common import wsgi import glance.db from glance.i18n import _ CONF = cfg.CONF class Controller(rpc.Controller): def __init__(self, raise_exc=False): super(Controller, self).__init__(raise_exc) # NOTE(flaper87): Avoid using registry's db # driver for the registry service. It would # end up in an infinite loop. if CONF.data_api == "glance.db.registry.api": msg = _("Registry service can't use %s") % CONF.data_api raise RuntimeError(msg) # NOTE(flaper87): Register the # db_api as a resource to expose. db_api = glance.db.get_api() self.register(glance.db.unwrap(db_api)) def create_resource(): """Images resource factory method.""" deserializer = rpc.RPCJSONDeserializer() serializer = rpc.RPCJSONSerializer() return wsgi.Resource(Controller(), deserializer, serializer) glance-12.0.0/glance/registry/api/v1/0000775000567000056710000000000012701407204020361 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/api/v1/members.py0000664000567000056710000003454012701407051022373 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import encodeutils import webob.exc from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db from glance.i18n import _, _LI, _LW LOG = logging.getLogger(__name__) class Controller(object): def _check_can_access_image_members(self, context): if context.owner is None and not context.is_admin: raise webob.exc.HTTPUnauthorized(_("No authenticated user")) def __init__(self): self.db_api = glance.db.get_api() def is_image_sharable(self, context, image): """Return True if the image can be shared to others in this context.""" # Is admin == image sharable if context.is_admin: return True # Only allow sharing if we have an owner if context.owner is None: return False # If we own the image, we can share it if context.owner == image['owner']: return True members = self.db_api.image_member_find(context, image_id=image['id'], member=context.owner) if members: return members[0]['can_share'] return False def index(self, req, image_id): """ Get the members of an image. """ try: self.db_api.image_get(req.context, image_id) except exception.NotFound: msg = _("Image %(id)s not found") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to image %(id)s but returning" " 'not found'") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() members = self.db_api.image_member_find(req.context, image_id=image_id) LOG.debug("Returning member list for image %(id)s", {'id': image_id}) return dict(members=make_member_list(members, member_id='member', can_share='can_share')) @utils.mutating def update_all(self, req, image_id, body): """ Replaces the members of the image with those specified in the body. The body is a dict with the following format:: {"memberships": [ {"member_id": , ["can_share": [True|False]]}, ... ]} """ self._check_can_access_image_members(req.context) # Make sure the image exists try: image = self.db_api.image_get(req.context, image_id) except exception.NotFound: msg = _("Image %(id)s not found") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to image %(id)s but returning" " 'not found'") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() # Can they manipulate the membership? if not self.is_image_sharable(req.context, image): msg = (_LW("User lacks permission to share image %(id)s") % {'id': image_id}) LOG.warn(msg) msg = _("No permission to share that image") raise webob.exc.HTTPForbidden(msg) # Get the membership list try: memb_list = body['memberships'] except Exception as e: # Malformed entity... msg = _LW("Invalid membership association specified for " "image %(id)s") % {'id': image_id} LOG.warn(msg) msg = (_("Invalid membership association: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) add = [] existing = {} # Walk through the incoming memberships for memb in memb_list: try: datum = dict(image_id=image['id'], member=memb['member_id'], can_share=None) except Exception as e: # Malformed entity... msg = _LW("Invalid membership association specified for " "image %(id)s") % {'id': image_id} LOG.warn(msg) msg = (_("Invalid membership association: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) # Figure out what can_share should be if 'can_share' in memb: datum['can_share'] = bool(memb['can_share']) # Try to find the corresponding membership members = self.db_api.image_member_find(req.context, image_id=datum['image_id'], member=datum['member'], include_deleted=True) try: member = members[0] except IndexError: # Default can_share datum['can_share'] = bool(datum['can_share']) add.append(datum) else: # Are we overriding can_share? if datum['can_share'] is None: datum['can_share'] = members[0]['can_share'] existing[member['id']] = { 'values': datum, 'membership': member, } # We now have a filtered list of memberships to add and # memberships to modify. Let's start by walking through all # the existing image memberships... existing_members = self.db_api.image_member_find(req.context, image_id=image['id'], include_deleted=True) for member in existing_members: if member['id'] in existing: # Just update the membership in place update = existing[member['id']]['values'] self.db_api.image_member_update(req.context, member['id'], update) else: if not member['deleted']: # Outdated one; needs to be deleted self.db_api.image_member_delete(req.context, member['id']) # Now add the non-existent ones for memb in add: self.db_api.image_member_create(req.context, memb) # Make an appropriate result LOG.info(_LI("Successfully updated memberships for image %(id)s"), {'id': image_id}) return webob.exc.HTTPNoContent() @utils.mutating def update(self, req, image_id, id, body=None): """ Adds a membership to the image, or updates an existing one. If a body is present, it is a dict with the following format:: {"member": { "can_share": [True|False] }} If "can_share" is provided, the member's ability to share is set accordingly. If it is not provided, existing memberships remain unchanged and new memberships default to False. """ self._check_can_access_image_members(req.context) # Make sure the image exists try: image = self.db_api.image_get(req.context, image_id) except exception.NotFound: msg = _("Image %(id)s not found") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to image %(id)s but returning" " 'not found'") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() # Can they manipulate the membership? if not self.is_image_sharable(req.context, image): msg = (_LW("User lacks permission to share image %(id)s") % {'id': image_id}) LOG.warn(msg) msg = _("No permission to share that image") raise webob.exc.HTTPForbidden(msg) # Determine the applicable can_share value can_share = None if body: try: can_share = bool(body['member']['can_share']) except Exception as e: # Malformed entity... msg = _LW("Invalid membership association specified for " "image %(id)s") % {'id': image_id} LOG.warn(msg) msg = (_("Invalid membership association: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=msg) # Look up an existing membership... members = self.db_api.image_member_find(req.context, image_id=image_id, member=id, include_deleted=True) if members: if can_share is not None: values = dict(can_share=can_share) self.db_api.image_member_update(req.context, members[0]['id'], values) else: values = dict(image_id=image['id'], member=id, can_share=bool(can_share)) self.db_api.image_member_create(req.context, values) LOG.info(_LI("Successfully updated a membership for image %(id)s"), {'id': image_id}) return webob.exc.HTTPNoContent() @utils.mutating def delete(self, req, image_id, id): """ Removes a membership from the image. """ self._check_can_access_image_members(req.context) # Make sure the image exists try: image = self.db_api.image_get(req.context, image_id) except exception.NotFound: msg = _("Image %(id)s not found") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists msg = _LW("Access denied to image %(id)s but returning" " 'not found'") % {'id': image_id} LOG.warn(msg) raise webob.exc.HTTPNotFound() # Can they manipulate the membership? if not self.is_image_sharable(req.context, image): msg = (_LW("User lacks permission to share image %(id)s") % {'id': image_id}) LOG.warn(msg) msg = _("No permission to share that image") raise webob.exc.HTTPForbidden(msg) # Look up an existing membership members = self.db_api.image_member_find(req.context, image_id=image_id, member=id) if members: self.db_api.image_member_delete(req.context, members[0]['id']) else: LOG.debug("%(id)s is not a member of image %(image_id)s", {'id': id, 'image_id': image_id}) msg = _("Membership could not be found.") raise webob.exc.HTTPNotFound(explanation=msg) # Make an appropriate result LOG.info(_LI("Successfully deleted a membership from image %(id)s"), {'id': image_id}) return webob.exc.HTTPNoContent() def default(self, req, *args, **kwargs): """This will cover the missing 'show' and 'create' actions""" LOG.debug("The method %s is not allowed for this resource", req.environ['REQUEST_METHOD']) raise webob.exc.HTTPMethodNotAllowed( headers=[('Allow', 'PUT, DELETE')]) def index_shared_images(self, req, id): """ Retrieves images shared with the given member. """ try: members = self.db_api.image_member_find(req.context, member=id) except exception.NotFound: msg = _LW("Member %(id)s not found") % {'id': id} LOG.warn(msg) msg = _("Membership could not be found.") raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug("Returning list of images shared with member %(id)s", {'id': id}) return dict(shared_images=make_member_list(members, image_id='image_id', can_share='can_share')) def make_member_list(members, **attr_map): """ Create a dict representation of a list of members which we can use to serialize the members list. Keyword arguments map the names of optional attributes to include to the database attribute. """ def _fetch_memb(memb, attr_map): return {k: memb[v] for k, v in attr_map.items() if v in memb.keys()} # Return the list of members with the given attribute mapping return [_fetch_memb(memb, attr_map) for memb in members] def create_resource(): """Image members resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() serializer = wsgi.JSONResponseSerializer() return wsgi.Resource(Controller(), deserializer, serializer) glance-12.0.0/glance/registry/api/v1/__init__.py0000664000567000056710000000662612701407047022511 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import wsgi from glance.registry.api.v1 import images from glance.registry.api.v1 import members def init(mapper): images_resource = images.create_resource() mapper.connect("/", controller=images_resource, action="index") mapper.connect("/images", controller=images_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/images", controller=images_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/images/detail", controller=images_resource, action="detail", conditions={'method': ['GET']}) mapper.connect("/images/{id}", controller=images_resource, action="show", conditions=dict(method=["GET"])) mapper.connect("/images/{id}", controller=images_resource, action="update", conditions=dict(method=["PUT"])) mapper.connect("/images/{id}", controller=images_resource, action="delete", conditions=dict(method=["DELETE"])) members_resource = members.create_resource() mapper.connect("/images/{image_id}/members", controller=members_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/images/{image_id}/members", controller=members_resource, action="create", conditions={'method': ['POST']}) mapper.connect("/images/{image_id}/members", controller=members_resource, action="update_all", conditions=dict(method=["PUT"])) mapper.connect("/images/{image_id}/members/{id}", controller=members_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/images/{image_id}/members/{id}", controller=members_resource, action="update", conditions={'method': ['PUT']}) mapper.connect("/images/{image_id}/members/{id}", controller=members_resource, action="delete", conditions={'method': ['DELETE']}) mapper.connect("/shared-images/{id}", controller=members_resource, action="index_shared_images") class API(wsgi.Router): """WSGI entry point for all Registry requests.""" def __init__(self, mapper): mapper = mapper or wsgi.APIMapper() init(mapper) super(API, self).__init__(mapper) glance-12.0.0/glance/registry/api/v1/images.py0000664000567000056710000005033412701407051022205 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Reference implementation registry server WSGI controller """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import strutils from oslo_utils import uuidutils from webob import exc from glance.common import exception from glance.common import timeutils from glance.common import utils from glance.common import wsgi import glance.db from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', 'disk_format', 'container_format', 'checksum'] SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', 'min_ram', 'min_disk', 'size_min', 'size_max', 'changes-since', 'protected'] SUPPORTED_SORT_KEYS = ('name', 'status', 'container_format', 'disk_format', 'size', 'id', 'created_at', 'updated_at') SUPPORTED_SORT_DIRS = ('asc', 'desc') SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') def _normalize_image_location_for_db(image_data): """ This function takes the legacy locations field and the newly added location_data field from the image_data values dictionary which flows over the wire between the registry and API servers and converts it into the location_data format only which is then consumable by the Image object. :param image_data: a dict of values representing information in the image :returns: a new image data dict """ if 'locations' not in image_data and 'location_data' not in image_data: image_data['locations'] = None return image_data locations = image_data.pop('locations', []) location_data = image_data.pop('location_data', []) location_data_dict = {} for l in locations: location_data_dict[l] = {} for l in location_data: location_data_dict[l['url']] = {'metadata': l['metadata'], 'status': l['status'], # Note(zhiyan): New location has no ID. 'id': l['id'] if 'id' in l else None} # NOTE(jbresnah) preserve original order. tests assume original order, # should that be defined functionality ordered_keys = locations[:] for ld in location_data: if ld['url'] not in ordered_keys: ordered_keys.append(ld['url']) location_data = [] for loc in ordered_keys: data = location_data_dict[loc] if data: location_data.append({'url': loc, 'metadata': data['metadata'], 'status': data['status'], 'id': data['id']}) else: location_data.append({'url': loc, 'metadata': {}, 'status': 'active', 'id': None}) image_data['locations'] = location_data return image_data class Controller(object): def __init__(self): self.db_api = glance.db.get_api() def _get_images(self, context, filters, **params): """Get images, wrapping in exception if necessary.""" # NOTE(markwash): for backwards compatibility, is_public=True for # admins actually means "treat me as if I'm not an admin and show me # all my images" if context.is_admin and params.get('is_public') is True: params['admin_as_user'] = True del params['is_public'] try: return self.db_api.image_get_all(context, filters=filters, **params) except exception.ImageNotFound: LOG.warn(_LW("Invalid marker. Image %(id)s could not be " "found.") % {'id': params.get('marker')}) msg = _("Invalid marker. Image could not be found.") raise exc.HTTPBadRequest(explanation=msg) except exception.Forbidden: LOG.warn(_LW("Access denied to image %(id)s but returning " "'not found'") % {'id': params.get('marker')}) msg = _("Invalid marker. Image could not be found.") raise exc.HTTPBadRequest(explanation=msg) except Exception: LOG.exception(_LE("Unable to get images")) raise def index(self, req): """Return a basic filtered list of public, non-deleted images :param req: the Request object coming from the wsgi layer :retval a mapping of the following form:: dict(images=[image_list]) Where image_list is a sequence of mappings:: { 'id': , 'name': , 'size': , 'disk_format': , 'container_format': , 'checksum': } """ params = self._get_query_params(req) images = self._get_images(req.context, **params) results = [] for image in images: result = {} for field in DISPLAY_FIELDS_IN_INDEX: result[field] = image[field] results.append(result) LOG.debug("Returning image list") return dict(images=results) def detail(self, req): """Return a filtered list of public, non-deleted images in detail :param req: the Request object coming from the wsgi layer :retval a mapping of the following form:: dict(images=[image_list]) Where image_list is a sequence of mappings containing all image model fields. """ params = self._get_query_params(req) images = self._get_images(req.context, **params) image_dicts = [make_image_dict(i) for i in images] LOG.debug("Returning detailed image list") return dict(images=image_dicts) def _get_query_params(self, req): """Extract necessary query parameters from http request. :param req: the Request object coming from the wsgi layer :retval dictionary of filters to apply to list of images """ params = { 'filters': self._get_filters(req), 'limit': self._get_limit(req), 'sort_key': [self._get_sort_key(req)], 'sort_dir': [self._get_sort_dir(req)], 'marker': self._get_marker(req), } if req.context.is_admin: # Only admin gets to look for non-public images params['is_public'] = self._get_is_public(req) # need to coy items because the params is modified in the loop body items = list(params.items()) for key, value in items: if value is None: del params[key] # Fix for LP Bug #1132294 # Ensure all shared images are returned in v1 params['member_status'] = 'all' return params def _get_filters(self, req): """Return a dictionary of query param filters from the request :param req: the Request object coming from the wsgi layer :retval a dict of key/value filters """ filters = {} properties = {} for param in req.params: if param in SUPPORTED_FILTERS: filters[param] = req.params.get(param) if param.startswith('property-'): _param = param[9:] properties[_param] = req.params.get(param) if 'changes-since' in filters: isotime = filters['changes-since'] try: filters['changes-since'] = timeutils.parse_isotime(isotime) except ValueError: raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) if 'protected' in filters: value = self._get_bool(filters['protected']) if value is None: raise exc.HTTPBadRequest(_("protected must be True, or " "False")) filters['protected'] = value # only allow admins to filter on 'deleted' if req.context.is_admin: deleted_filter = self._parse_deleted_filter(req) if deleted_filter is not None: filters['deleted'] = deleted_filter elif 'changes-since' not in filters: filters['deleted'] = False elif 'changes-since' not in filters: filters['deleted'] = False if properties: filters['properties'] = properties return filters def _get_limit(self, req): """Parse a limit query param into something usable.""" try: limit = int(req.params.get('limit', CONF.limit_param_default)) except ValueError: raise exc.HTTPBadRequest(_("limit param must be an integer")) if limit < 0: raise exc.HTTPBadRequest(_("limit param must be positive")) return min(CONF.api_limit_max, limit) def _get_marker(self, req): """Parse a marker query param into something usable.""" marker = req.params.get('marker', None) if marker and not uuidutils.is_uuid_like(marker): msg = _('Invalid marker format') raise exc.HTTPBadRequest(explanation=msg) return marker def _get_sort_key(self, req): """Parse a sort key query param from the request object.""" sort_key = req.params.get('sort_key', 'created_at') if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: _keys = ', '.join(SUPPORTED_SORT_KEYS) msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) raise exc.HTTPBadRequest(explanation=msg) return sort_key def _get_sort_dir(self, req): """Parse a sort direction query param from the request object.""" sort_dir = req.params.get('sort_dir', 'desc') if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: _keys = ', '.join(SUPPORTED_SORT_DIRS) msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) raise exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_bool(self, value): value = value.lower() if value == 'true' or value == '1': return True elif value == 'false' or value == '0': return False return None def _get_is_public(self, req): """Parse is_public into something usable.""" is_public = req.params.get('is_public', None) if is_public is None: # NOTE(vish): This preserves the default value of showing only # public images. return True elif is_public.lower() == 'none': return None value = self._get_bool(is_public) if value is None: raise exc.HTTPBadRequest(_("is_public must be None, True, or " "False")) return value def _parse_deleted_filter(self, req): """Parse deleted into something usable.""" deleted = req.params.get('deleted') if deleted is None: return None return strutils.bool_from_string(deleted) def show(self, req, id): """Return data about the given image id.""" try: image = self.db_api.image_get(req.context, id) LOG.debug("Successfully retrieved image %(id)s", {'id': id}) except exception.ImageNotFound: LOG.info(_LI("Image %(id)s not found"), {'id': id}) raise exc.HTTPNotFound() except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists LOG.info(_LI("Access denied to image %(id)s but returning" " 'not found'"), {'id': id}) raise exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unable to show image %s") % id) raise return dict(image=make_image_dict(image)) @utils.mutating def delete(self, req, id): """Deletes an existing image with the registry. :param req: wsgi Request object :param id: The opaque internal identifier for the image :retval Returns 200 if delete was successful, a fault if not. On success, the body contains the deleted image information as a mapping. """ try: deleted_image = self.db_api.image_destroy(req.context, id) LOG.info(_LI("Successfully deleted image %(id)s"), {'id': id}) return dict(image=make_image_dict(deleted_image)) except exception.ForbiddenPublicImage: LOG.info(_LI("Delete denied for public image %(id)s"), {'id': id}) raise exc.HTTPForbidden() except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists LOG.info(_LI("Access denied to image %(id)s but returning" " 'not found'"), {'id': id}) return exc.HTTPNotFound() except exception.ImageNotFound: LOG.info(_LI("Image %(id)s not found"), {'id': id}) return exc.HTTPNotFound() except Exception: LOG.exception(_LE("Unable to delete image %s") % id) raise @utils.mutating def create(self, req, body): """Registers a new image with the registry. :param req: wsgi Request object :param body: Dictionary of information about the image :retval Returns the newly-created image information as a mapping, which will include the newly-created image's internal id in the 'id' field """ image_data = body['image'] # Ensure the image has a status set image_data.setdefault('status', 'active') # Set up the image owner if not req.context.is_admin or 'owner' not in image_data: image_data['owner'] = req.context.owner image_id = image_data.get('id') if image_id and not uuidutils.is_uuid_like(image_id): LOG.info(_LI("Rejecting image creation request for invalid image " "id '%(bad_id)s'"), {'bad_id': image_id}) msg = _("Invalid image id format") return exc.HTTPBadRequest(explanation=msg) if 'location' in image_data: image_data['locations'] = [image_data.pop('location')] try: image_data = _normalize_image_location_for_db(image_data) image_data = self.db_api.image_create(req.context, image_data) image_data = dict(image=make_image_dict(image_data)) LOG.info(_LI("Successfully created image %(id)s"), {'id': image_data['image']['id']}) return image_data except exception.Duplicate: msg = _("Image with identifier %s already exists!") % image_id LOG.warn(msg) return exc.HTTPConflict(msg) except exception.Invalid as e: msg = (_("Failed to add image metadata. " "Got error: %s") % encodeutils.exception_to_unicode(e)) LOG.error(msg) return exc.HTTPBadRequest(msg) except Exception: LOG.exception(_LE("Unable to create image %s"), image_id) raise @utils.mutating def update(self, req, id, body): """Updates an existing image with the registry. :param req: wsgi Request object :param body: Dictionary of information about the image :param id: The opaque internal identifier for the image :retval Returns the updated image information as a mapping, """ image_data = body['image'] from_state = body.get('from_state', None) # Prohibit modification of 'owner' if not req.context.is_admin and 'owner' in image_data: del image_data['owner'] if 'location' in image_data: image_data['locations'] = [image_data.pop('location')] purge_props = req.headers.get("X-Glance-Registry-Purge-Props", "false") try: # These fields hold sensitive data, which should not be printed in # the logs. sensitive_fields = ['locations', 'location_data'] LOG.debug("Updating image %(id)s with metadata: %(image_data)r", {'id': id, 'image_data': {k: v for k, v in image_data.items() if k not in sensitive_fields}}) image_data = _normalize_image_location_for_db(image_data) if purge_props == "true": purge_props = True else: purge_props = False updated_image = self.db_api.image_update(req.context, id, image_data, purge_props=purge_props, from_state=from_state) LOG.info(_LI("Updating metadata for image %(id)s"), {'id': id}) return dict(image=make_image_dict(updated_image)) except exception.Invalid as e: msg = (_("Failed to update image metadata. " "Got error: %s") % encodeutils.exception_to_unicode(e)) LOG.error(msg) return exc.HTTPBadRequest(msg) except exception.ImageNotFound: LOG.info(_LI("Image %(id)s not found"), {'id': id}) raise exc.HTTPNotFound(body='Image not found', request=req, content_type='text/plain') except exception.ForbiddenPublicImage: LOG.info(_LI("Update denied for public image %(id)s"), {'id': id}) raise exc.HTTPForbidden() except exception.Forbidden: # If it's private and doesn't belong to them, don't let on # that it exists LOG.info(_LI("Access denied to image %(id)s but returning" " 'not found'"), {'id': id}) raise exc.HTTPNotFound(body='Image not found', request=req, content_type='text/plain') except exception.Conflict as e: LOG.info(encodeutils.exception_to_unicode(e)) raise exc.HTTPConflict(body='Image operation conflicts', request=req, content_type='text/plain') except Exception: LOG.exception(_LE("Unable to update image %s") % id) raise def _limit_locations(image): locations = image.pop('locations', []) image['location_data'] = locations image['location'] = None for loc in locations: if loc['status'] == 'active': image['location'] = loc['url'] break def make_image_dict(image): """Create a dict representation of an image which we can use to serialize the image. """ def _fetch_attrs(d, attrs): return {a: d[a] for a in attrs if a in d.keys()} # TODO(sirp): should this be a dict, or a list of dicts? # A plain dict is more convenient, but list of dicts would provide # access to created_at, etc properties = {p['name']: p['value'] for p in image['properties'] if not p['deleted']} image_dict = _fetch_attrs(image, glance.db.IMAGE_ATTRS) image_dict['properties'] = properties _limit_locations(image_dict) return image_dict def create_resource(): """Images resource factory method.""" deserializer = wsgi.JSONRequestDeserializer() serializer = wsgi.JSONResponseSerializer() return wsgi.Resource(Controller(), deserializer, serializer) glance-12.0.0/glance/registry/client/0000775000567000056710000000000012701407204020540 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/client/__init__.py0000664000567000056710000001343112701407047022660 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from glance.i18n import _ registry_client_opts = [ cfg.StrOpt('registry_client_protocol', default='http', help=_('The protocol to use for communication with the ' 'registry server. Either http or https.')), cfg.StrOpt('registry_client_key_file', help=_('The path to the key file to use in SSL connections ' 'to the registry server, if any. Alternately, you may ' 'set the GLANCE_CLIENT_KEY_FILE environment variable to ' 'a filepath of the key file')), cfg.StrOpt('registry_client_cert_file', help=_('The path to the cert file to use in SSL connections ' 'to the registry server, if any. Alternately, you may ' 'set the GLANCE_CLIENT_CERT_FILE environment variable ' 'to a filepath of the CA cert file')), cfg.StrOpt('registry_client_ca_file', help=_('The path to the certifying authority cert file to use ' 'in SSL connections to the registry server, if any. ' 'Alternately, you may set the GLANCE_CLIENT_CA_FILE ' 'environment variable to a filepath of the CA cert ' 'file.')), cfg.BoolOpt('registry_client_insecure', default=False, help=_('When using SSL in connections to the registry server, ' 'do not require validation via a certifying ' 'authority. This is the registry\'s equivalent of ' 'specifying --insecure on the command line using ' 'glanceclient for the API.')), cfg.IntOpt('registry_client_timeout', default=600, help=_('The period of time, in seconds, that the API server ' 'will wait for a registry request to complete. A ' 'value of 0 implies no timeout.')), ] _DEPRECATE_USE_USER_TOKEN_MSG = ('This option was considered harmful and ' 'has been deprecated in M release. It will ' 'be removed in O release. For more ' 'information read OSSN-0060. ' 'Related functionality with uploading big ' 'images has been implemented with Keystone ' 'trusts support.') registry_client_ctx_opts = [ cfg.BoolOpt('use_user_token', default=True, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, help=_('Whether to pass through the user token when ' 'making requests to the registry. To prevent ' 'failures with token expiration during big ' 'files upload, it is recommended to set this ' 'parameter to False.' 'If "use_user_token" is not in effect, then ' 'admin credentials can be specified.')), cfg.StrOpt('admin_user', secret=True, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, help=_('The administrators user name. ' 'If "use_user_token" is not in effect, then ' 'admin credentials can be specified.')), cfg.StrOpt('admin_password', secret=True, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, help=_('The administrators password. ' 'If "use_user_token" is not in effect, then ' 'admin credentials can be specified.')), cfg.StrOpt('admin_tenant_name', secret=True, deprecated_for_removal=True, deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, help=_('The tenant name of the administrative user. ' 'If "use_user_token" is not in effect, then ' 'admin tenant name can be specified.')), cfg.StrOpt('auth_url', deprecated_for_removal=True, deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, help=_('The URL to the keystone service. ' 'If "use_user_token" is not in effect and ' 'using keystone auth, then URL of keystone ' 'can be specified.')), cfg.StrOpt('auth_strategy', default='noauth', deprecated_for_removal=True, deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, help=_('The strategy to use for authentication. ' 'If "use_user_token" is not in effect, then ' 'auth strategy can be specified.')), cfg.StrOpt('auth_region', deprecated_for_removal=True, deprecated_reason=_DEPRECATE_USE_USER_TOKEN_MSG, help=_('The region for the authentication service. ' 'If "use_user_token" is not in effect and ' 'using keystone auth, then region name can ' 'be specified.')), ] CONF = cfg.CONF CONF.register_opts(registry_client_opts) CONF.register_opts(registry_client_ctx_opts) glance-12.0.0/glance/registry/client/v2/0000775000567000056710000000000012701407204021067 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/client/v2/__init__.py0000664000567000056710000000000012701407047023173 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/client/v2/api.py0000664000567000056710000000710112701407047022216 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Registry's Client V2 """ import os from oslo_config import cfg from oslo_log import log as logging from glance.common import exception from glance.i18n import _ from glance.registry.client.v2 import client LOG = logging.getLogger(__name__) CONF = cfg.CONF _registry_client = 'glance.registry.client' CONF.import_opt('registry_client_protocol', _registry_client) CONF.import_opt('registry_client_key_file', _registry_client) CONF.import_opt('registry_client_cert_file', _registry_client) CONF.import_opt('registry_client_ca_file', _registry_client) CONF.import_opt('registry_client_insecure', _registry_client) CONF.import_opt('registry_client_timeout', _registry_client) CONF.import_opt('use_user_token', _registry_client) CONF.import_opt('admin_user', _registry_client) CONF.import_opt('admin_password', _registry_client) CONF.import_opt('admin_tenant_name', _registry_client) CONF.import_opt('auth_url', _registry_client) CONF.import_opt('auth_strategy', _registry_client) CONF.import_opt('auth_region', _registry_client) _CLIENT_CREDS = None _CLIENT_HOST = None _CLIENT_PORT = None _CLIENT_KWARGS = {} def configure_registry_client(): """ Sets up a registry client for use in registry lookups """ global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT try: host, port = CONF.registry_host, CONF.registry_port except cfg.ConfigFileValueError: msg = _("Configuration option was not valid") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(msg) except IndexError: msg = _("Could not find required configuration option") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(msg) _CLIENT_HOST = host _CLIENT_PORT = port _CLIENT_KWARGS = { 'use_ssl': CONF.registry_client_protocol.lower() == 'https', 'key_file': CONF.registry_client_key_file, 'cert_file': CONF.registry_client_cert_file, 'ca_file': CONF.registry_client_ca_file, 'insecure': CONF.registry_client_insecure, 'timeout': CONF.registry_client_timeout, } if not CONF.use_user_token: configure_registry_admin_creds() def configure_registry_admin_creds(): global _CLIENT_CREDS if CONF.auth_url or os.getenv('OS_AUTH_URL'): strategy = 'keystone' else: strategy = CONF.auth_strategy _CLIENT_CREDS = { 'user': CONF.admin_user, 'password': CONF.admin_password, 'username': CONF.admin_user, 'tenant': CONF.admin_tenant_name, 'auth_url': os.getenv('OS_AUTH_URL') or CONF.auth_url, 'strategy': strategy, 'region': CONF.auth_region, } def get_registry_client(cxt): global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT kwargs = _CLIENT_KWARGS.copy() if CONF.use_user_token: kwargs['auth_token'] = cxt.auth_token if _CLIENT_CREDS: kwargs['creds'] = _CLIENT_CREDS return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, **kwargs) glance-12.0.0/glance/registry/client/v2/client.py0000664000567000056710000000153212701407047022725 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Simple client class to speak with any RESTful service that implements the Glance Registry API """ from glance.common import rpc class RegistryClient(rpc.RPCClient): """Registry's V2 Client.""" DEFAULT_PORT = 9191 glance-12.0.0/glance/registry/client/v1/0000775000567000056710000000000012701407204021066 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/client/v1/__init__.py0000664000567000056710000000000012701407047023172 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/registry/client/v1/api.py0000664000567000056710000001552512701407047022226 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Registry's Client API """ import os from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from glance.common import exception from glance.i18n import _ from glance.registry.client.v1 import client LOG = logging.getLogger(__name__) registry_client_ctx_opts = [ cfg.BoolOpt('send_identity_headers', default=False, help=_("Whether to pass through headers containing user " "and tenant information when making requests to " "the registry. This allows the registry to use the " "context middleware without keystonemiddleware's " "auth_token middleware, removing calls to the keystone " "auth service. It is recommended that when using this " "option, secure communication between glance api and " "glance registry is ensured by means other than " "auth_token middleware.")), ] CONF = cfg.CONF CONF.register_opts(registry_client_ctx_opts) _registry_client = 'glance.registry.client' CONF.import_opt('registry_client_protocol', _registry_client) CONF.import_opt('registry_client_key_file', _registry_client) CONF.import_opt('registry_client_cert_file', _registry_client) CONF.import_opt('registry_client_ca_file', _registry_client) CONF.import_opt('registry_client_insecure', _registry_client) CONF.import_opt('registry_client_timeout', _registry_client) CONF.import_opt('use_user_token', _registry_client) CONF.import_opt('admin_user', _registry_client) CONF.import_opt('admin_password', _registry_client) CONF.import_opt('admin_tenant_name', _registry_client) CONF.import_opt('auth_url', _registry_client) CONF.import_opt('auth_strategy', _registry_client) CONF.import_opt('auth_region', _registry_client) CONF.import_opt('metadata_encryption_key', 'glance.common.config') _CLIENT_CREDS = None _CLIENT_HOST = None _CLIENT_PORT = None _CLIENT_KWARGS = {} # AES key used to encrypt 'location' metadata _METADATA_ENCRYPTION_KEY = None def configure_registry_client(): """ Sets up a registry client for use in registry lookups """ global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY try: host, port = CONF.registry_host, CONF.registry_port except cfg.ConfigFileValueError: msg = _("Configuration option was not valid") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(reason=msg) except IndexError: msg = _("Could not find required configuration option") LOG.error(msg) raise exception.BadRegistryConnectionConfiguration(reason=msg) _CLIENT_HOST = host _CLIENT_PORT = port _METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key _CLIENT_KWARGS = { 'use_ssl': CONF.registry_client_protocol.lower() == 'https', 'key_file': CONF.registry_client_key_file, 'cert_file': CONF.registry_client_cert_file, 'ca_file': CONF.registry_client_ca_file, 'insecure': CONF.registry_client_insecure, 'timeout': CONF.registry_client_timeout, } if not CONF.use_user_token: configure_registry_admin_creds() def configure_registry_admin_creds(): global _CLIENT_CREDS if CONF.auth_url or os.getenv('OS_AUTH_URL'): strategy = 'keystone' else: strategy = CONF.auth_strategy _CLIENT_CREDS = { 'user': CONF.admin_user, 'password': CONF.admin_password, 'username': CONF.admin_user, 'tenant': CONF.admin_tenant_name, 'auth_url': os.getenv('OS_AUTH_URL') or CONF.auth_url, 'strategy': strategy, 'region': CONF.auth_region, } def get_registry_client(cxt): global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT global _METADATA_ENCRYPTION_KEY kwargs = _CLIENT_KWARGS.copy() if CONF.use_user_token: kwargs['auth_token'] = cxt.auth_token if _CLIENT_CREDS: kwargs['creds'] = _CLIENT_CREDS if CONF.send_identity_headers: identity_headers = { 'X-User-Id': cxt.user or '', 'X-Tenant-Id': cxt.tenant or '', 'X-Roles': ','.join(cxt.roles), 'X-Identity-Status': 'Confirmed', 'X-Service-Catalog': jsonutils.dumps(cxt.service_catalog), } kwargs['identity_headers'] = identity_headers kwargs['request_id'] = cxt.request_id return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY, **kwargs) def get_images_list(context, **kwargs): c = get_registry_client(context) return c.get_images(**kwargs) def get_images_detail(context, **kwargs): c = get_registry_client(context) return c.get_images_detailed(**kwargs) def get_image_metadata(context, image_id): c = get_registry_client(context) return c.get_image(image_id) def add_image_metadata(context, image_meta): LOG.debug("Adding image metadata...") c = get_registry_client(context) return c.add_image(image_meta) def update_image_metadata(context, image_id, image_meta, purge_props=False, from_state=None): LOG.debug("Updating image metadata for image %s...", image_id) c = get_registry_client(context) return c.update_image(image_id, image_meta, purge_props=purge_props, from_state=from_state) def delete_image_metadata(context, image_id): LOG.debug("Deleting image metadata for image %s...", image_id) c = get_registry_client(context) return c.delete_image(image_id) def get_image_members(context, image_id): c = get_registry_client(context) return c.get_image_members(image_id) def get_member_images(context, member_id): c = get_registry_client(context) return c.get_member_images(member_id) def replace_members(context, image_id, member_data): c = get_registry_client(context) return c.replace_members(image_id, member_data) def add_member(context, image_id, member_id, can_share=None): c = get_registry_client(context) return c.add_member(image_id, member_id, can_share=can_share) def delete_member(context, image_id, member_id): c = get_registry_client(context) return c.delete_member(image_id, member_id) glance-12.0.0/glance/registry/client/v1/client.py0000664000567000056710000002622012701407047022725 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Simple client class to speak with any RESTful service that implements the Glance Registry API """ from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils import six from glance.common.client import BaseClient from glance.common import crypt from glance.i18n import _LE from glance.registry.api.v1 import images LOG = logging.getLogger(__name__) class RegistryClient(BaseClient): """A client for the Registry image metadata service.""" DEFAULT_PORT = 9191 def __init__(self, host=None, port=None, metadata_encryption_key=None, identity_headers=None, **kwargs): """ :param metadata_encryption_key: Key used to encrypt 'location' metadata """ self.metadata_encryption_key = metadata_encryption_key # NOTE (dprince): by default base client overwrites host and port # settings when using keystone. configure_via_auth=False disables # this behaviour to ensure we still send requests to the Registry API self.identity_headers = identity_headers # store available passed request id for do_request call self._passed_request_id = kwargs.pop('request_id', None) BaseClient.__init__(self, host, port, configure_via_auth=False, **kwargs) def decrypt_metadata(self, image_metadata): if self.metadata_encryption_key: if image_metadata.get('location'): location = crypt.urlsafe_decrypt(self.metadata_encryption_key, image_metadata['location']) image_metadata['location'] = location if image_metadata.get('location_data'): ld = [] for loc in image_metadata['location_data']: url = crypt.urlsafe_decrypt(self.metadata_encryption_key, loc['url']) ld.append({'id': loc['id'], 'url': url, 'metadata': loc['metadata'], 'status': loc['status']}) image_metadata['location_data'] = ld return image_metadata def encrypt_metadata(self, image_metadata): if self.metadata_encryption_key: location_url = image_metadata.get('location') if location_url: location = crypt.urlsafe_encrypt(self.metadata_encryption_key, location_url, 64) image_metadata['location'] = location if image_metadata.get('location_data'): ld = [] for loc in image_metadata['location_data']: if loc['url'] == location_url: url = location else: url = crypt.urlsafe_encrypt( self.metadata_encryption_key, loc['url'], 64) ld.append({'url': url, 'metadata': loc['metadata'], 'status': loc['status'], # NOTE(zhiyan): New location has no ID field. 'id': loc.get('id')}) image_metadata['location_data'] = ld return image_metadata def get_images(self, **kwargs): """ Returns a list of image id/name mappings from Registry :param filters: dict of keys & expected values to filter results :param marker: image id after which to start page :param limit: max number of images to return :param sort_key: results will be ordered by this image attribute :param sort_dir: direction in which to order results (asc, desc) """ params = self._extract_params(kwargs, images.SUPPORTED_PARAMS) res = self.do_request("GET", "/images", params=params) image_list = jsonutils.loads(res.read())['images'] for image in image_list: image = self.decrypt_metadata(image) return image_list def do_request(self, method, action, **kwargs): try: kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers'].update(self.identity_headers or {}) if self._passed_request_id: request_id = self._passed_request_id if six.PY3 and isinstance(request_id, bytes): request_id = request_id.decode('utf-8') kwargs['headers']['X-Openstack-Request-ID'] = request_id res = super(RegistryClient, self).do_request(method, action, **kwargs) status = res.status request_id = res.getheader('x-openstack-request-id') if six.PY3 and isinstance(request_id, bytes): request_id = request_id.decode('utf-8') LOG.debug("Registry request %(method)s %(action)s HTTP %(status)s" " request id %(request_id)s", {'method': method, 'action': action, 'status': status, 'request_id': request_id}) except Exception as exc: with excutils.save_and_reraise_exception(): exc_name = exc.__class__.__name__ LOG.exception(_LE("Registry client request %(method)s " "%(action)s raised %(exc_name)s"), {'method': method, 'action': action, 'exc_name': exc_name}) return res def get_images_detailed(self, **kwargs): """ Returns a list of detailed image data mappings from Registry :param filters: dict of keys & expected values to filter results :param marker: image id after which to start page :param limit: max number of images to return :param sort_key: results will be ordered by this image attribute :param sort_dir: direction in which to order results (asc, desc) """ params = self._extract_params(kwargs, images.SUPPORTED_PARAMS) res = self.do_request("GET", "/images/detail", params=params) image_list = jsonutils.loads(res.read())['images'] for image in image_list: image = self.decrypt_metadata(image) return image_list def get_image(self, image_id): """Returns a mapping of image metadata from Registry.""" res = self.do_request("GET", "/images/%s" % image_id) data = jsonutils.loads(res.read())['image'] return self.decrypt_metadata(data) def add_image(self, image_metadata): """ Tells registry about an image's metadata """ headers = { 'Content-Type': 'application/json', } if 'image' not in image_metadata: image_metadata = dict(image=image_metadata) encrypted_metadata = self.encrypt_metadata(image_metadata['image']) image_metadata['image'] = encrypted_metadata body = jsonutils.dump_as_bytes(image_metadata) res = self.do_request("POST", "/images", body=body, headers=headers) # Registry returns a JSONified dict(image=image_info) data = jsonutils.loads(res.read()) image = data['image'] return self.decrypt_metadata(image) def update_image(self, image_id, image_metadata, purge_props=False, from_state=None): """ Updates Registry's information about an image """ if 'image' not in image_metadata: image_metadata = dict(image=image_metadata) encrypted_metadata = self.encrypt_metadata(image_metadata['image']) image_metadata['image'] = encrypted_metadata image_metadata['from_state'] = from_state body = jsonutils.dump_as_bytes(image_metadata) headers = { 'Content-Type': 'application/json', } if purge_props: headers["X-Glance-Registry-Purge-Props"] = "true" res = self.do_request("PUT", "/images/%s" % image_id, body=body, headers=headers) data = jsonutils.loads(res.read()) image = data['image'] return self.decrypt_metadata(image) def delete_image(self, image_id): """ Deletes Registry's information about an image """ res = self.do_request("DELETE", "/images/%s" % image_id) data = jsonutils.loads(res.read()) image = data['image'] return image def get_image_members(self, image_id): """Return a list of membership associations from Registry.""" res = self.do_request("GET", "/images/%s/members" % image_id) data = jsonutils.loads(res.read())['members'] return data def get_member_images(self, member_id): """Return a list of membership associations from Registry.""" res = self.do_request("GET", "/shared-images/%s" % member_id) data = jsonutils.loads(res.read())['shared_images'] return data def replace_members(self, image_id, member_data): """Replace registry's information about image membership.""" if isinstance(member_data, (list, tuple)): member_data = dict(memberships=list(member_data)) elif (isinstance(member_data, dict) and 'memberships' not in member_data): member_data = dict(memberships=[member_data]) body = jsonutils.dump_as_bytes(member_data) headers = {'Content-Type': 'application/json', } res = self.do_request("PUT", "/images/%s/members" % image_id, body=body, headers=headers) return self.get_status_code(res) == 204 def add_member(self, image_id, member_id, can_share=None): """Add to registry's information about image membership.""" body = None headers = {} # Build up a body if can_share is specified if can_share is not None: body = jsonutils.dump_as_bytes( dict(member=dict(can_share=can_share))) headers['Content-Type'] = 'application/json' url = "/images/%s/members/%s" % (image_id, member_id) res = self.do_request("PUT", url, body=body, headers=headers) return self.get_status_code(res) == 204 def delete_member(self, image_id, member_id): """Delete registry's information about image membership.""" res = self.do_request("DELETE", "/images/%s/members/%s" % (image_id, member_id)) return self.get_status_code(res) == 204 glance-12.0.0/glance/gateway.py0000664000567000056710000002713012701407047017435 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from glance.api import authorization from glance.api import policy from glance.api import property_protections from glance.common import property_utils from glance.common import store_utils import glance.db import glance.domain import glance.location import glance.notifier import glance.quota class Gateway(object): def __init__(self, db_api=None, store_api=None, notifier=None, policy_enforcer=None): self.db_api = db_api or glance.db.get_api() self.store_api = store_api or glance_store self.store_utils = store_utils self.notifier = notifier or glance.notifier.Notifier() self.policy = policy_enforcer or policy.Enforcer() def get_image_factory(self, context): image_factory = glance.domain.ImageFactory() store_image_factory = glance.location.ImageFactoryProxy( image_factory, context, self.store_api, self.store_utils) quota_image_factory = glance.quota.ImageFactoryProxy( store_image_factory, context, self.db_api, self.store_utils) policy_image_factory = policy.ImageFactoryProxy( quota_image_factory, context, self.policy) notifier_image_factory = glance.notifier.ImageFactoryProxy( policy_image_factory, context, self.notifier) if property_utils.is_property_protection_enabled(): property_rules = property_utils.PropertyRules(self.policy) pif = property_protections.ProtectedImageFactoryProxy( notifier_image_factory, context, property_rules) authorized_image_factory = authorization.ImageFactoryProxy( pif, context) else: authorized_image_factory = authorization.ImageFactoryProxy( notifier_image_factory, context) return authorized_image_factory def get_image_member_factory(self, context): image_factory = glance.domain.ImageMemberFactory() quota_image_factory = glance.quota.ImageMemberFactoryProxy( image_factory, context, self.db_api, self.store_utils) policy_member_factory = policy.ImageMemberFactoryProxy( quota_image_factory, context, self.policy) authorized_image_factory = authorization.ImageMemberFactoryProxy( policy_member_factory, context) return authorized_image_factory def get_repo(self, context): image_repo = glance.db.ImageRepo(context, self.db_api) store_image_repo = glance.location.ImageRepoProxy( image_repo, context, self.store_api, self.store_utils) quota_image_repo = glance.quota.ImageRepoProxy( store_image_repo, context, self.db_api, self.store_utils) policy_image_repo = policy.ImageRepoProxy( quota_image_repo, context, self.policy) notifier_image_repo = glance.notifier.ImageRepoProxy( policy_image_repo, context, self.notifier) if property_utils.is_property_protection_enabled(): property_rules = property_utils.PropertyRules(self.policy) pir = property_protections.ProtectedImageRepoProxy( notifier_image_repo, context, property_rules) authorized_image_repo = authorization.ImageRepoProxy( pir, context) else: authorized_image_repo = authorization.ImageRepoProxy( notifier_image_repo, context) return authorized_image_repo def get_member_repo(self, image, context): image_member_repo = glance.db.ImageMemberRepo( context, self.db_api, image) store_image_repo = glance.location.ImageMemberRepoProxy( image_member_repo, image, context, self.store_api) policy_member_repo = policy.ImageMemberRepoProxy( store_image_repo, image, context, self.policy) notifier_member_repo = glance.notifier.ImageMemberRepoProxy( policy_member_repo, image, context, self.notifier) authorized_member_repo = authorization.ImageMemberRepoProxy( notifier_member_repo, image, context) return authorized_member_repo def get_task_factory(self, context): task_factory = glance.domain.TaskFactory() policy_task_factory = policy.TaskFactoryProxy( task_factory, context, self.policy) notifier_task_factory = glance.notifier.TaskFactoryProxy( policy_task_factory, context, self.notifier) authorized_task_factory = authorization.TaskFactoryProxy( notifier_task_factory, context) return authorized_task_factory def get_task_repo(self, context): task_repo = glance.db.TaskRepo(context, self.db_api) policy_task_repo = policy.TaskRepoProxy( task_repo, context, self.policy) notifier_task_repo = glance.notifier.TaskRepoProxy( policy_task_repo, context, self.notifier) authorized_task_repo = authorization.TaskRepoProxy( notifier_task_repo, context) return authorized_task_repo def get_task_stub_repo(self, context): task_stub_repo = glance.db.TaskRepo(context, self.db_api) policy_task_stub_repo = policy.TaskStubRepoProxy( task_stub_repo, context, self.policy) notifier_task_stub_repo = glance.notifier.TaskStubRepoProxy( policy_task_stub_repo, context, self.notifier) authorized_task_stub_repo = authorization.TaskStubRepoProxy( notifier_task_stub_repo, context) return authorized_task_stub_repo def get_task_executor_factory(self, context): task_repo = self.get_task_repo(context) image_repo = self.get_repo(context) image_factory = self.get_image_factory(context) return glance.domain.TaskExecutorFactory(task_repo, image_repo, image_factory) def get_metadef_namespace_factory(self, context): ns_factory = glance.domain.MetadefNamespaceFactory() policy_ns_factory = policy.MetadefNamespaceFactoryProxy( ns_factory, context, self.policy) notifier_ns_factory = glance.notifier.MetadefNamespaceFactoryProxy( policy_ns_factory, context, self.notifier) authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy( notifier_ns_factory, context) return authorized_ns_factory def get_metadef_namespace_repo(self, context): ns_repo = glance.db.MetadefNamespaceRepo(context, self.db_api) policy_ns_repo = policy.MetadefNamespaceRepoProxy( ns_repo, context, self.policy) notifier_ns_repo = glance.notifier.MetadefNamespaceRepoProxy( policy_ns_repo, context, self.notifier) authorized_ns_repo = authorization.MetadefNamespaceRepoProxy( notifier_ns_repo, context) return authorized_ns_repo def get_metadef_object_factory(self, context): object_factory = glance.domain.MetadefObjectFactory() policy_object_factory = policy.MetadefObjectFactoryProxy( object_factory, context, self.policy) notifier_object_factory = glance.notifier.MetadefObjectFactoryProxy( policy_object_factory, context, self.notifier) authorized_object_factory = authorization.MetadefObjectFactoryProxy( notifier_object_factory, context) return authorized_object_factory def get_metadef_object_repo(self, context): object_repo = glance.db.MetadefObjectRepo(context, self.db_api) policy_object_repo = policy.MetadefObjectRepoProxy( object_repo, context, self.policy) notifier_object_repo = glance.notifier.MetadefObjectRepoProxy( policy_object_repo, context, self.notifier) authorized_object_repo = authorization.MetadefObjectRepoProxy( notifier_object_repo, context) return authorized_object_repo def get_metadef_resource_type_factory(self, context): resource_type_factory = glance.domain.MetadefResourceTypeFactory() policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy( resource_type_factory, context, self.policy) notifier_resource_type_factory = ( glance.notifier.MetadefResourceTypeFactoryProxy( policy_resource_type_factory, context, self.notifier) ) authorized_resource_type_factory = ( authorization.MetadefResourceTypeFactoryProxy( notifier_resource_type_factory, context) ) return authorized_resource_type_factory def get_metadef_resource_type_repo(self, context): resource_type_repo = glance.db.MetadefResourceTypeRepo( context, self.db_api) policy_object_repo = policy.MetadefResourceTypeRepoProxy( resource_type_repo, context, self.policy) notifier_object_repo = glance.notifier.MetadefResourceTypeRepoProxy( policy_object_repo, context, self.notifier) authorized_object_repo = authorization.MetadefResourceTypeRepoProxy( notifier_object_repo, context) return authorized_object_repo def get_metadef_property_factory(self, context): prop_factory = glance.domain.MetadefPropertyFactory() policy_prop_factory = policy.MetadefPropertyFactoryProxy( prop_factory, context, self.policy) notifier_prop_factory = glance.notifier.MetadefPropertyFactoryProxy( policy_prop_factory, context, self.notifier) authorized_prop_factory = authorization.MetadefPropertyFactoryProxy( notifier_prop_factory, context) return authorized_prop_factory def get_metadef_property_repo(self, context): prop_repo = glance.db.MetadefPropertyRepo(context, self.db_api) policy_prop_repo = policy.MetadefPropertyRepoProxy( prop_repo, context, self.policy) notifier_prop_repo = glance.notifier.MetadefPropertyRepoProxy( policy_prop_repo, context, self.notifier) authorized_prop_repo = authorization.MetadefPropertyRepoProxy( notifier_prop_repo, context) return authorized_prop_repo def get_metadef_tag_factory(self, context): tag_factory = glance.domain.MetadefTagFactory() policy_tag_factory = policy.MetadefTagFactoryProxy( tag_factory, context, self.policy) notifier_tag_factory = glance.notifier.MetadefTagFactoryProxy( policy_tag_factory, context, self.notifier) authorized_tag_factory = authorization.MetadefTagFactoryProxy( notifier_tag_factory, context) return authorized_tag_factory def get_metadef_tag_repo(self, context): tag_repo = glance.db.MetadefTagRepo(context, self.db_api) policy_tag_repo = policy.MetadefTagRepoProxy( tag_repo, context, self.policy) notifier_tag_repo = glance.notifier.MetadefTagRepoProxy( policy_tag_repo, context, self.notifier) authorized_tag_repo = authorization.MetadefTagRepoProxy( notifier_tag_repo, context) return authorized_tag_repo glance-12.0.0/glance/image_cache/0000775000567000056710000000000012701407204017617 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/image_cache/prefetcher.py0000664000567000056710000000552612701407047022335 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Prefetches images into the Image Cache """ import eventlet import glance_store from oslo_log import log as logging from glance.common import exception from glance import context from glance.i18n import _LI, _LW from glance.image_cache import base import glance.registry.client.v1.api as registry LOG = logging.getLogger(__name__) class Prefetcher(base.CacheApp): def __init__(self): super(Prefetcher, self).__init__() registry.configure_registry_client() registry.configure_registry_admin_creds() def fetch_image_into_cache(self, image_id): ctx = context.RequestContext(is_admin=True, show_deleted=True) try: image_meta = registry.get_image_metadata(ctx, image_id) if image_meta['status'] != 'active': LOG.warn(_LW("Image '%s' is not active. Not caching.") % image_id) return False except exception.NotFound: LOG.warn(_LW("No metadata found for image '%s'") % image_id) return False location = image_meta['location'] image_data, image_size = glance_store.get_from_backend(location, context=ctx) LOG.debug("Caching image '%s'", image_id) cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data, image_meta['checksum']) # Image is tee'd into cache and checksum verified # as we iterate list(cache_tee_iter) return True def run(self): images = self.cache.get_queued_images() if not images: LOG.debug("Nothing to prefetch.") return True num_images = len(images) LOG.debug("Found %d images to prefetch", num_images) pool = eventlet.GreenPool(num_images) results = pool.imap(self.fetch_image_into_cache, images) successes = sum([1 for r in results if r is True]) if successes != num_images: LOG.warn(_LW("Failed to successfully cache all " "images in queue.")) return False LOG.info(_LI("Successfully cached all %d images"), num_images) return True glance-12.0.0/glance/image_cache/cleaner.py0000664000567000056710000000143512701407047021612 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cleans up any invalid cache entries """ from glance.image_cache import base class Cleaner(base.CacheApp): def run(self): self.cache.clean() glance-12.0.0/glance/image_cache/pruner.py0000664000567000056710000000141612701407047021513 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Prunes the Image Cache """ from glance.image_cache import base class Pruner(base.CacheApp): def run(self): self.cache.prune() glance-12.0.0/glance/image_cache/__init__.py0000664000567000056710000002774312701407051021745 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ LRU Cache for Image Data """ import hashlib from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import units from glance.common import exception from glance.common import utils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) image_cache_opts = [ cfg.StrOpt('image_cache_driver', default='sqlite', help=_('The driver to use for image cache management.')), cfg.IntOpt('image_cache_max_size', default=10 * units.Gi, # 10 GB help=_('The upper limit (the maximum size of accumulated ' 'cache in bytes) beyond which the cache pruner, if ' 'running, starts cleaning the image cache.')), cfg.IntOpt('image_cache_stall_time', default=86400, # 24 hours help=_('The amount of time to let an incomplete image remain ' 'in the cache, before the cache cleaner, if running, ' 'will remove the incomplete image.')), cfg.StrOpt('image_cache_dir', help=_('Base directory that the image cache uses.')), ] CONF = cfg.CONF CONF.register_opts(image_cache_opts) class ImageCache(object): """Provides an LRU cache for image data.""" def __init__(self): self.init_driver() def init_driver(self): """ Create the driver for the cache """ driver_name = CONF.image_cache_driver driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') try: self.driver_class = importutils.import_class(driver_module) LOG.info(_LI("Image cache loaded driver '%s'."), driver_name) except ImportError as import_err: LOG.warn(_LW("Image cache driver " "'%(driver_name)s' failed to load. " "Got error: '%(import_err)s."), {'driver_name': driver_name, 'import_err': import_err}) driver_module = __name__ + '.drivers.sqlite.Driver' LOG.info(_LI("Defaulting to SQLite driver.")) self.driver_class = importutils.import_class(driver_module) self.configure_driver() def configure_driver(self): """ Configure the driver for the cache and, if it fails to configure, fall back to using the SQLite driver which has no odd dependencies """ try: self.driver = self.driver_class() self.driver.configure() except exception.BadDriverConfiguration as config_err: driver_module = self.driver_class.__module__ LOG.warn(_LW("Image cache driver " "'%(driver_module)s' failed to configure. " "Got error: '%(config_err)s"), {'driver_module': driver_module, 'config_err': config_err}) LOG.info(_LI("Defaulting to SQLite driver.")) default_module = __name__ + '.drivers.sqlite.Driver' self.driver_class = importutils.import_class(default_module) self.driver = self.driver_class() self.driver.configure() def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ return self.driver.is_cached(image_id) def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. :param image_id: Image ID """ return self.driver.is_queued(image_id) def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ return self.driver.get_cache_size() def get_hit_count(self, image_id): """ Return the number of hits that an image has :param image_id: Opaque image identifier """ return self.driver.get_hit_count(image_id) def get_cached_images(self): """ Returns a list of records about cached images. """ return self.driver.get_cached_images() def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images and returns the number of cached image files that were deleted. """ return self.driver.delete_all_cached_images() def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ self.driver.delete_cached_image(image_id) def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images and returns the number of queued image files that were deleted. """ return self.driver.delete_all_queued_images() def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ self.driver.delete_queued_image(image_id) def prune(self): """ Removes all cached image files above the cache's maximum size. Returns a tuple containing the total number of cached files removed and the total size of all pruned image files. """ max_size = CONF.image_cache_max_size current_size = self.driver.get_cache_size() if max_size > current_size: LOG.debug("Image cache has free space, skipping prune...") return (0, 0) overage = current_size - max_size LOG.debug("Image cache currently %(overage)d bytes over max " "size. Starting prune to max size of %(max_size)d ", {'overage': overage, 'max_size': max_size}) total_bytes_pruned = 0 total_files_pruned = 0 entry = self.driver.get_least_recently_accessed() while entry and current_size > max_size: image_id, size = entry LOG.debug("Pruning '%(image_id)s' to free %(size)d bytes", {'image_id': image_id, 'size': size}) self.driver.delete_cached_image(image_id) total_bytes_pruned = total_bytes_pruned + size total_files_pruned = total_files_pruned + 1 current_size = current_size - size entry = self.driver.get_least_recently_accessed() LOG.debug("Pruning finished pruning. " "Pruned %(total_files_pruned)d and " "%(total_bytes_pruned)d.", {'total_files_pruned': total_files_pruned, 'total_bytes_pruned': total_bytes_pruned}) return total_files_pruned, total_bytes_pruned def clean(self, stall_time=None): """ Cleans up any invalid or incomplete cached images. The cache driver decides what that means... """ self.driver.clean(stall_time) def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ return self.driver.queue_image(image_id) def get_caching_iter(self, image_id, image_checksum, image_iter): """ Returns an iterator that caches the contents of an image while the image contents are read through the supplied iterator. :param image_id: Image ID :param image_checksum: checksum expected to be generated while iterating over image data :param image_iter: Iterator that will read image contents """ if not self.driver.is_cacheable(image_id): return image_iter LOG.debug("Tee'ing image '%s' into cache", image_id) return self.cache_tee_iter(image_id, image_iter, image_checksum) def cache_tee_iter(self, image_id, image_iter, image_checksum): try: current_checksum = hashlib.md5() with self.driver.open_for_write(image_id) as cache_file: for chunk in image_iter: try: cache_file.write(chunk) finally: current_checksum.update(chunk) yield chunk cache_file.flush() if (image_checksum and image_checksum != current_checksum.hexdigest()): msg = _("Checksum verification failed. Aborted " "caching of image '%s'.") % image_id raise exception.GlanceException(msg) except exception.GlanceException as e: with excutils.save_and_reraise_exception(): # image_iter has given us bad, (size_checked_iter has found a # bad length), or corrupt data (checksum is wrong). LOG.exception(encodeutils.exception_to_unicode(e)) except Exception as e: LOG.exception(_LE("Exception encountered while tee'ing " "image '%(image_id)s' into cache: %(error)s. " "Continuing with response.") % {'image_id': image_id, 'error': encodeutils.exception_to_unicode(e)}) # If no checksum provided continue responding even if # caching failed. for chunk in image_iter: yield chunk def cache_image_iter(self, image_id, image_iter, image_checksum=None): """ Cache an image with supplied iterator. :param image_id: Image ID :param image_file: Iterator retrieving image chunks :param image_checksum: Checksum of image :retval True if image file was cached, False otherwise """ if not self.driver.is_cacheable(image_id): return False for chunk in self.get_caching_iter(image_id, image_checksum, image_iter): pass return True def cache_image_file(self, image_id, image_file): """ Cache an image file. :param image_id: Image ID :param image_file: Image file to cache :retval True if image file was cached, False otherwise """ CHUNKSIZE = 64 * units.Mi return self.cache_image_iter(image_id, utils.chunkiter(image_file, CHUNKSIZE)) def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :note Upon successful reading of the image file, the image's hit count will be incremented. :param image_id: Image ID """ return self.driver.open_for_read(image_id) def get_image_size(self, image_id): """ Return the size of the image file for an image with supplied identifier. :param image_id: Image ID """ return self.driver.get_image_size(image_id) def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ return self.driver.get_queued_images() glance-12.0.0/glance/image_cache/drivers/0000775000567000056710000000000012701407204021275 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/image_cache/drivers/sqlite.py0000664000567000056710000004041412701407047023160 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cache driver that uses SQLite to store information about cached images """ from __future__ import absolute_import from contextlib import contextmanager import os import sqlite3 import stat import time from eventlet import sleep from eventlet import timeout from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from glance.common import exception from glance.i18n import _, _LE, _LI, _LW from glance.image_cache.drivers import base LOG = logging.getLogger(__name__) sqlite_opts = [ cfg.StrOpt('image_cache_sqlite_db', default='cache.db', help=_('The path to the sqlite file database that will be ' 'used for image cache management.')), ] CONF = cfg.CONF CONF.register_opts(sqlite_opts) DEFAULT_SQL_CALL_TIMEOUT = 2 class SqliteConnection(sqlite3.Connection): """ SQLite DB Connection handler that plays well with eventlet, slightly modified from Swift's similar code. """ def __init__(self, *args, **kwargs): self.timeout_seconds = kwargs.get('timeout', DEFAULT_SQL_CALL_TIMEOUT) kwargs['timeout'] = 0 sqlite3.Connection.__init__(self, *args, **kwargs) def _timeout(self, call): with timeout.Timeout(self.timeout_seconds): while True: try: return call() except sqlite3.OperationalError as e: if 'locked' not in str(e): raise sleep(0.05) def execute(self, *args, **kwargs): return self._timeout(lambda: sqlite3.Connection.execute( self, *args, **kwargs)) def commit(self): return self._timeout(lambda: sqlite3.Connection.commit(self)) def dict_factory(cur, row): return {col[0]: row[idx] for idx, col in enumerate(cur.description)} class Driver(base.Driver): """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. """ def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ super(Driver, self).configure() # Create the SQLite database that will hold our cache attributes self.initialize_db() def initialize_db(self): db = CONF.image_cache_sqlite_db self.db_path = os.path.join(self.base_dir, db) try: conn = sqlite3.connect(self.db_path, check_same_thread=False, factory=SqliteConnection) conn.executescript(""" CREATE TABLE IF NOT EXISTS cached_images ( image_id TEXT PRIMARY KEY, last_accessed REAL DEFAULT 0.0, last_modified REAL DEFAULT 0.0, size INTEGER DEFAULT 0, hits INTEGER DEFAULT 0, checksum TEXT ); """) conn.close() except sqlite3.DatabaseError as e: msg = _("Failed to initialize the image cache database. " "Got error: %s") % e LOG.error(msg) raise exception.BadDriverConfiguration(driver_name='sqlite', reason=msg) def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ sizes = [] for path in self.get_cache_files(self.base_dir): if path == self.db_path: continue file_info = os.stat(path) sizes.append(file_info[stat.ST_SIZE]) return sum(sizes) def get_hit_count(self, image_id): """ Return the number of hits that an image has. :param image_id: Opaque image identifier """ if not self.is_cached(image_id): return 0 hits = 0 with self.get_db() as db: cur = db.execute("""SELECT hits FROM cached_images WHERE image_id = ?""", (image_id,)) hits = cur.fetchone()[0] return hits def get_cached_images(self): """ Returns a list of records about cached images. """ LOG.debug("Gathering cached image entries.") with self.get_db() as db: cur = db.execute("""SELECT image_id, hits, last_accessed, last_modified, size FROM cached_images ORDER BY image_id""") cur.row_factory = dict_factory return [r for r in cur] def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ return os.path.exists(self.get_image_filepath(image_id)) def is_cacheable(self, image_id): """ Returns True if the image with the supplied ID can have its image file cached, False otherwise. :param image_id: Image ID """ # Make sure we're not already cached or caching the image return not (self.is_cached(image_id) or self.is_being_cached(image_id)) def is_being_cached(self, image_id): """ Returns True if the image with supplied id is currently in the process of having its image file cached. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'incomplete') return os.path.exists(path) def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') return os.path.exists(path) def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images """ deleted = 0 with self.get_db() as db: for path in self.get_cache_files(self.base_dir): delete_cached_file(path) deleted += 1 db.execute("""DELETE FROM cached_images""") db.commit() return deleted def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id) with self.get_db() as db: delete_cached_file(path) db.execute("""DELETE FROM cached_images WHERE image_id = ?""", (image_id, )) db.commit() def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images """ files = [f for f in self.get_cache_files(self.queue_dir)] for file in files: os.unlink(file) return len(files) def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') if os.path.exists(path): os.unlink(path) def clean(self, stall_time=None): """ Delete any image files in the invalid directory and any files in the incomplete directory that are older than a configurable amount of time. """ self.delete_invalid_files() if stall_time is None: stall_time = CONF.image_cache_stall_time now = time.time() older_than = now - stall_time self.delete_stalled_files(older_than) def get_least_recently_accessed(self): """ Return a tuple containing the image_id and size of the least recently accessed cached file, or None if no cached files. """ with self.get_db() as db: cur = db.execute("""SELECT image_id FROM cached_images ORDER BY last_accessed LIMIT 1""") try: image_id = cur.fetchone()[0] except TypeError: # There are no more cached images return None path = self.get_image_filepath(image_id) try: file_info = os.stat(path) size = file_info[stat.ST_SIZE] except OSError: size = 0 return image_id, size @contextmanager def open_for_write(self, image_id): """ Open a file for writing the image file for an image with supplied identifier. :param image_id: Image ID """ incomplete_path = self.get_image_filepath(image_id, 'incomplete') def commit(): with self.get_db() as db: final_path = self.get_image_filepath(image_id) LOG.debug("Fetch finished, moving " "'%(incomplete_path)s' to '%(final_path)s'", dict(incomplete_path=incomplete_path, final_path=final_path)) os.rename(incomplete_path, final_path) # Make sure that we "pop" the image from the queue... if self.is_queued(image_id): os.unlink(self.get_image_filepath(image_id, 'queue')) filesize = os.path.getsize(final_path) now = time.time() db.execute("""INSERT INTO cached_images (image_id, last_accessed, last_modified, hits, size) VALUES (?, ?, ?, 0, ?)""", (image_id, now, now, filesize)) db.commit() def rollback(e): with self.get_db() as db: if os.path.exists(incomplete_path): invalid_path = self.get_image_filepath(image_id, 'invalid') LOG.warn(_LW("Fetch of cache file failed (%(e)s), rolling " "back by moving '%(incomplete_path)s' to " "'%(invalid_path)s'") % {'e': e, 'incomplete_path': incomplete_path, 'invalid_path': invalid_path}) os.rename(incomplete_path, invalid_path) db.execute("""DELETE FROM cached_images WHERE image_id = ?""", (image_id, )) db.commit() try: with open(incomplete_path, 'wb') as cache_file: yield cache_file except Exception as e: with excutils.save_and_reraise_exception(): rollback(e) else: commit() finally: # if the generator filling the cache file neither raises an # exception, nor completes fetching all data, neither rollback # nor commit will have been called, so the incomplete file # will persist - in that case remove it as it is unusable # example: ^c from client fetch if os.path.exists(incomplete_path): rollback('incomplete fetch') @contextmanager def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :param image_id: Image ID """ path = self.get_image_filepath(image_id) with open(path, 'rb') as cache_file: yield cache_file now = time.time() with self.get_db() as db: db.execute("""UPDATE cached_images SET hits = hits + 1, last_accessed = ? WHERE image_id = ?""", (now, image_id)) db.commit() @contextmanager def get_db(self): """ Returns a context manager that produces a database connection that self-closes and calls rollback if an error occurs while using the database connection """ conn = sqlite3.connect(self.db_path, check_same_thread=False, factory=SqliteConnection) conn.row_factory = sqlite3.Row conn.text_factory = str conn.execute('PRAGMA synchronous = NORMAL') conn.execute('PRAGMA count_changes = OFF') conn.execute('PRAGMA temp_store = MEMORY') try: yield conn except sqlite3.DatabaseError as e: msg = _LE("Error executing SQLite call. Got error: %s") % e LOG.error(msg) conn.rollback() finally: conn.close() def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ if self.is_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id) return False if self.is_being_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already being " "written to cache"), image_id) return False if self.is_queued(image_id): LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id) return False path = self.get_image_filepath(image_id, 'queue') # Touch the file to add it to the queue with open(path, "w"): pass return True def delete_invalid_files(self): """ Removes any invalid cache entries """ for path in self.get_cache_files(self.invalid_dir): os.unlink(path) LOG.info(_LI("Removed invalid cache file %s"), path) def delete_stalled_files(self, older_than): """ Removes any incomplete cache entries older than a supplied modified time. :param older_than: Files written to on or before this timestamp will be deleted. """ for path in self.get_cache_files(self.incomplete_dir): if os.path.getmtime(path) < older_than: try: os.unlink(path) LOG.info(_LI("Removed stalled cache file %s"), path) except Exception as e: msg = (_LW("Failed to delete file %(path)s. " "Got error: %(e)s"), dict(path=path, e=e)) LOG.warn(msg) def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ files = [f for f in self.get_cache_files(self.queue_dir)] items = [] for path in files: mtime = os.path.getmtime(path) items.append((mtime, os.path.basename(path))) items.sort() return [image_id for (modtime, image_id) in items] def get_cache_files(self, basepath): """ Returns cache files in the supplied directory :param basepath: Directory to look in for cache files """ for fname in os.listdir(basepath): path = os.path.join(basepath, fname) if path != self.db_path and os.path.isfile(path): yield path def delete_cached_file(path): if os.path.exists(path): LOG.debug("Deleting image cache file '%s'", path) os.unlink(path) else: LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to" " delete") % path) glance-12.0.0/glance/image_cache/drivers/__init__.py0000664000567000056710000000000012701407047023401 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/image_cache/drivers/base.py0000664000567000056710000001464612701407047022601 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base attribute driver class """ import os.path from oslo_config import cfg from oslo_log import log as logging from glance.common import exception from glance.common import utils from glance.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF class Driver(object): def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ # Here we set up the various file-based image cache paths # that we need in order to find the files in different states # of cache management. self.set_paths() def set_paths(self): """ Creates all necessary directories under the base cache directory """ self.base_dir = CONF.image_cache_dir if self.base_dir is None: msg = _('Failed to read %s from config') % 'image_cache_dir' LOG.error(msg) driver = self.__class__.__module__ raise exception.BadDriverConfiguration(driver_name=driver, reason=msg) self.incomplete_dir = os.path.join(self.base_dir, 'incomplete') self.invalid_dir = os.path.join(self.base_dir, 'invalid') self.queue_dir = os.path.join(self.base_dir, 'queue') dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir] for path in dirs: utils.safe_mkdirs(path) def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ raise NotImplementedError def get_cached_images(self): """ Returns a list of records about cached images. The list of records shall be ordered by image ID and shall look like:: [ { 'image_id': , 'hits': INTEGER, 'last_modified': ISO_TIMESTAMP, 'last_accessed': ISO_TIMESTAMP, 'size': INTEGER }, ... ] """ return NotImplementedError def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ raise NotImplementedError def is_cacheable(self, image_id): """ Returns True if the image with the supplied ID can have its image file cached, False otherwise. :param image_id: Image ID """ raise NotImplementedError def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. :param image_id: Image ID """ raise NotImplementedError def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images and returns the number of cached image files that were deleted. """ raise NotImplementedError def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ raise NotImplementedError def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images and returns the number of queued image files that were deleted. """ raise NotImplementedError def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ raise NotImplementedError def queue_image(self, image_id): """ Puts an image identifier in a queue for caching. Return True on successful add to the queue, False otherwise... :param image_id: Image ID """ def clean(self, stall_time=None): """ Dependent on the driver, clean up and destroy any invalid or incomplete cached images """ raise NotImplementedError def get_least_recently_accessed(self): """ Return a tuple containing the image_id and size of the least recently accessed cached file, or None if no cached files. """ raise NotImplementedError def open_for_write(self, image_id): """ Open a file for writing the image file for an image with supplied identifier. :param image_id: Image ID """ raise NotImplementedError def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :param image_id: Image ID """ raise NotImplementedError def get_image_filepath(self, image_id, cache_status='active'): """ This crafts an absolute path to a specific entry :param image_id: Image ID :param cache_status: Status of the image in the cache """ if cache_status == 'active': return os.path.join(self.base_dir, str(image_id)) return os.path.join(self.base_dir, cache_status, str(image_id)) def get_image_size(self, image_id): """ Return the size of the image file for an image with supplied identifier. :param image_id: Image ID """ path = self.get_image_filepath(image_id) return os.path.getsize(path) def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ raise NotImplementedError glance-12.0.0/glance/image_cache/drivers/xattr.py0000664000567000056710000004107712701407047023027 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. Assumptions =========== 1. Cache data directory exists on a filesytem that updates atime on reads ('noatime' should NOT be set) 2. Cache data directory exists on a filesystem that supports xattrs. This is optional, but highly recommended since it allows us to present ops with useful information pertaining to the cache, like human readable filenames and statistics. 3. `glance-prune` is scheduled to run as a periodic job via cron. This is needed to run the LRU prune strategy to keep the cache size within the limits set by the config file. Cache Directory Notes ===================== The image cache data directory contains the main cache path, where the active cache entries and subdirectories for handling partial downloads and errored-out cache images. The layout looks like: $image_cache_dir/ entry1 entry2 ... incomplete/ invalid/ queue/ """ from __future__ import absolute_import from contextlib import contextmanager import errno import os import stat import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils import six import xattr from glance.common import exception from glance.i18n import _, _LI, _LW from glance.image_cache.drivers import base LOG = logging.getLogger(__name__) CONF = cfg.CONF class Driver(base.Driver): """ Cache driver that uses xattr file tags and requires a filesystem that has atimes set. """ def configure(self): """ Configure the driver to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadDriverConfiguration` """ # Here we set up the various file-based image cache paths # that we need in order to find the files in different states # of cache management. self.set_paths() # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs image_cache_dir = self.base_dir fake_image_filepath = os.path.join(image_cache_dir, 'checkme') with open(fake_image_filepath, 'wb') as fake_file: fake_file.write(b"XXX") fake_file.flush() try: set_xattr(fake_image_filepath, 'hits', '1') except IOError as e: if e.errno == errno.EOPNOTSUPP: msg = (_("The device housing the image cache directory " "%(image_cache_dir)s does not support xattr. It is" " likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the" " device housing the cache directory.") % {'image_cache_dir': image_cache_dir}) LOG.error(msg) raise exception.BadDriverConfiguration(driver_name="xattr", reason=msg) else: # Cleanup after ourselves... if os.path.exists(fake_image_filepath): os.unlink(fake_image_filepath) def get_cache_size(self): """ Returns the total size in bytes of the image cache. """ sizes = [] for path in get_all_regular_files(self.base_dir): file_info = os.stat(path) sizes.append(file_info[stat.ST_SIZE]) return sum(sizes) def get_hit_count(self, image_id): """ Return the number of hits that an image has. :param image_id: Opaque image identifier """ if not self.is_cached(image_id): return 0 path = self.get_image_filepath(image_id) return int(get_xattr(path, 'hits', default=0)) def get_cached_images(self): """ Returns a list of records about cached images. """ LOG.debug("Gathering cached image entries.") entries = [] for path in get_all_regular_files(self.base_dir): image_id = os.path.basename(path) entry = {'image_id': image_id} file_info = os.stat(path) entry['last_modified'] = file_info[stat.ST_MTIME] entry['last_accessed'] = file_info[stat.ST_ATIME] entry['size'] = file_info[stat.ST_SIZE] entry['hits'] = self.get_hit_count(image_id) entries.append(entry) entries.sort() # Order by ID return entries def is_cached(self, image_id): """ Returns True if the image with the supplied ID has its image file cached. :param image_id: Image ID """ return os.path.exists(self.get_image_filepath(image_id)) def is_cacheable(self, image_id): """ Returns True if the image with the supplied ID can have its image file cached, False otherwise. :param image_id: Image ID """ # Make sure we're not already cached or caching the image return not (self.is_cached(image_id) or self.is_being_cached(image_id)) def is_being_cached(self, image_id): """ Returns True if the image with supplied id is currently in the process of having its image file cached. :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'incomplete') return os.path.exists(path) def is_queued(self, image_id): """ Returns True if the image identifier is in our cache queue. """ path = self.get_image_filepath(image_id, 'queue') return os.path.exists(path) def delete_all_cached_images(self): """ Removes all cached image files and any attributes about the images """ deleted = 0 for path in get_all_regular_files(self.base_dir): delete_cached_file(path) deleted += 1 return deleted def delete_cached_image(self, image_id): """ Removes a specific cached image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id) delete_cached_file(path) def delete_all_queued_images(self): """ Removes all queued image files and any attributes about the images """ files = [f for f in get_all_regular_files(self.queue_dir)] for file in files: os.unlink(file) return len(files) def delete_queued_image(self, image_id): """ Removes a specific queued image file and any attributes about the image :param image_id: Image ID """ path = self.get_image_filepath(image_id, 'queue') if os.path.exists(path): os.unlink(path) def get_least_recently_accessed(self): """ Return a tuple containing the image_id and size of the least recently accessed cached file, or None if no cached files. """ stats = [] for path in get_all_regular_files(self.base_dir): file_info = os.stat(path) stats.append((file_info[stat.ST_ATIME], # access time file_info[stat.ST_SIZE], # size in bytes path)) # absolute path if not stats: return None stats.sort() return os.path.basename(stats[0][2]), stats[0][1] @contextmanager def open_for_write(self, image_id): """ Open a file for writing the image file for an image with supplied identifier. :param image_id: Image ID """ incomplete_path = self.get_image_filepath(image_id, 'incomplete') def set_attr(key, value): set_xattr(incomplete_path, key, value) def commit(): set_attr('hits', 0) final_path = self.get_image_filepath(image_id) LOG.debug("Fetch finished, moving " "'%(incomplete_path)s' to '%(final_path)s'", dict(incomplete_path=incomplete_path, final_path=final_path)) os.rename(incomplete_path, final_path) # Make sure that we "pop" the image from the queue... if self.is_queued(image_id): LOG.debug("Removing image '%s' from queue after " "caching it.", image_id) os.unlink(self.get_image_filepath(image_id, 'queue')) def rollback(e): set_attr('error', encodeutils.exception_to_unicode(e)) invalid_path = self.get_image_filepath(image_id, 'invalid') LOG.debug("Fetch of cache file failed (%(e)s), rolling back by " "moving '%(incomplete_path)s' to " "'%(invalid_path)s'", {'e': encodeutils.exception_to_unicode(e), 'incomplete_path': incomplete_path, 'invalid_path': invalid_path}) os.rename(incomplete_path, invalid_path) try: with open(incomplete_path, 'wb') as cache_file: yield cache_file except Exception as e: with excutils.save_and_reraise_exception(): rollback(e) else: commit() finally: # if the generator filling the cache file neither raises an # exception, nor completes fetching all data, neither rollback # nor commit will have been called, so the incomplete file # will persist - in that case remove it as it is unusable # example: ^c from client fetch if os.path.exists(incomplete_path): rollback('incomplete fetch') @contextmanager def open_for_read(self, image_id): """ Open and yield file for reading the image file for an image with supplied identifier. :param image_id: Image ID """ path = self.get_image_filepath(image_id) with open(path, 'rb') as cache_file: yield cache_file path = self.get_image_filepath(image_id) inc_xattr(path, 'hits', 1) def queue_image(self, image_id): """ This adds a image to be cache to the queue. If the image already exists in the queue or has already been cached, we return False, True otherwise :param image_id: Image ID """ if self.is_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id) return False if self.is_being_cached(image_id): LOG.info(_LI("Not queueing image '%s'. Already being " "written to cache"), image_id) return False if self.is_queued(image_id): LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id) return False path = self.get_image_filepath(image_id, 'queue') LOG.debug("Queueing image '%s'.", image_id) # Touch the file to add it to the queue with open(path, "w"): pass return True def get_queued_images(self): """ Returns a list of image IDs that are in the queue. The list should be sorted by the time the image ID was inserted into the queue. """ files = [f for f in get_all_regular_files(self.queue_dir)] items = [] for path in files: mtime = os.path.getmtime(path) items.append((mtime, os.path.basename(path))) items.sort() return [image_id for (modtime, image_id) in items] def _reap_old_files(self, dirpath, entry_type, grace=None): now = time.time() reaped = 0 for path in get_all_regular_files(dirpath): mtime = os.path.getmtime(path) age = now - mtime if not grace: LOG.debug("No grace period, reaping '%(path)s'" " immediately", {'path': path}) delete_cached_file(path) reaped += 1 elif age > grace: LOG.debug("Cache entry '%(path)s' exceeds grace period, " "(%(age)i s > %(grace)i s)", {'path': path, 'age': age, 'grace': grace}) delete_cached_file(path) reaped += 1 LOG.info(_LI("Reaped %(reaped)s %(entry_type)s cache entries"), {'reaped': reaped, 'entry_type': entry_type}) return reaped def reap_invalid(self, grace=None): """Remove any invalid cache entries :param grace: Number of seconds to keep an invalid entry around for debugging purposes. If None, then delete immediately. """ return self._reap_old_files(self.invalid_dir, 'invalid', grace=grace) def reap_stalled(self, grace=None): """Remove any stalled cache entries :param grace: Number of seconds to keep an invalid entry around for debugging purposes. If None, then delete immediately. """ return self._reap_old_files(self.incomplete_dir, 'stalled', grace=grace) def clean(self, stall_time=None): """ Delete any image files in the invalid directory and any files in the incomplete directory that are older than a configurable amount of time. """ self.reap_invalid() if stall_time is None: stall_time = CONF.image_cache_stall_time self.reap_stalled(stall_time) def get_all_regular_files(basepath): for fname in os.listdir(basepath): path = os.path.join(basepath, fname) if os.path.isfile(path): yield path def delete_cached_file(path): if os.path.exists(path): LOG.debug("Deleting image cache file '%s'", path) os.unlink(path) else: LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to" " delete") % path) def _make_namespaced_xattr_key(key, namespace='user'): """ Create a fully-qualified xattr-key by including the intended namespace. Namespacing differs among OSes[1]: FreeBSD: user, system Linux: user, system, trusted, security MacOS X: not needed Mac OS X won't break if we include a namespace qualifier, so, for simplicity, we always include it. -- [1] http://en.wikipedia.org/wiki/Extended_file_attributes """ namespaced_key = ".".join([namespace, key]) return namespaced_key def get_xattr(path, key, **kwargs): """Return the value for a particular xattr If the key doesn't not exist, or xattrs aren't supported by the file system then a KeyError will be raised, that is, unless you specify a default using kwargs. """ namespaced_key = _make_namespaced_xattr_key(key) try: return xattr.getxattr(path, namespaced_key) except IOError: if 'default' in kwargs: return kwargs['default'] else: raise def set_xattr(path, key, value): """Set the value of a specified xattr. If xattrs aren't supported by the file-system, we skip setting the value. """ namespaced_key = _make_namespaced_xattr_key(key) if not isinstance(value, six.binary_type): value = str(value) if six.PY3: value = value.encode('utf-8') xattr.setxattr(path, namespaced_key, value) def inc_xattr(path, key, n=1): """ Increment the value of an xattr (assuming it is an integer). BEWARE, this code *does* have a RACE CONDITION, since the read/update/write sequence is not atomic. Since the use-case for this function is collecting stats--not critical-- the benefits of simple, lock-free code out-weighs the possibility of an occasional hit not being counted. """ count = int(get_xattr(path, key)) count += n set_xattr(path, key, str(count)) glance-12.0.0/glance/image_cache/base.py0000664000567000056710000000133512701407047021112 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.image_cache import ImageCache class CacheApp(object): def __init__(self): self.cache = ImageCache() glance-12.0.0/glance/image_cache/client.py0000664000567000056710000001011612701407047021453 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_serialization import jsonutils as json from glance.common import client as base_client from glance.common import exception from glance.i18n import _ class CacheClient(base_client.BaseClient): DEFAULT_PORT = 9292 DEFAULT_DOC_ROOT = '/v1' def delete_cached_image(self, image_id): """ Delete a specified image from the cache """ self.do_request("DELETE", "/cached_images/%s" % image_id) return True def get_cached_images(self, **kwargs): """ Returns a list of images stored in the image cache. """ res = self.do_request("GET", "/cached_images") data = json.loads(res.read())['cached_images'] return data def get_queued_images(self, **kwargs): """ Returns a list of images queued for caching """ res = self.do_request("GET", "/queued_images") data = json.loads(res.read())['queued_images'] return data def delete_all_cached_images(self): """ Delete all cached images """ res = self.do_request("DELETE", "/cached_images") data = json.loads(res.read()) num_deleted = data['num_deleted'] return num_deleted def queue_image_for_caching(self, image_id): """ Queue an image for prefetching into cache """ self.do_request("PUT", "/queued_images/%s" % image_id) return True def delete_queued_image(self, image_id): """ Delete a specified image from the cache queue """ self.do_request("DELETE", "/queued_images/%s" % image_id) return True def delete_all_queued_images(self): """ Delete all queued images """ res = self.do_request("DELETE", "/queued_images") data = json.loads(res.read()) num_deleted = data['num_deleted'] return num_deleted def get_client(host, port=None, timeout=None, use_ssl=False, username=None, password=None, tenant=None, auth_url=None, auth_strategy=None, auth_token=None, region=None, is_silent_upload=False, insecure=False): """ Returns a new client Glance client object based on common kwargs. If an option isn't specified falls back to common environment variable defaults. """ if auth_url or os.getenv('OS_AUTH_URL'): force_strategy = 'keystone' else: force_strategy = None creds = { 'username': username or os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')), 'password': password or os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')), 'tenant': tenant or os.getenv('OS_AUTH_TENANT', os.getenv('OS_TENANT_NAME')), 'auth_url': auth_url or os.getenv('OS_AUTH_URL'), 'strategy': force_strategy or auth_strategy or os.getenv('OS_AUTH_STRATEGY', 'noauth'), 'region': region or os.getenv('OS_REGION_NAME'), } if creds['strategy'] == 'keystone' and not creds['auth_url']: msg = _("--os_auth_url option or OS_AUTH_URL environment variable " "required when keystone authentication strategy is enabled\n") raise exception.ClientConfigurationError(msg) return CacheClient( host=host, port=port, timeout=timeout, use_ssl=use_ssl, auth_token=auth_token or os.getenv('OS_TOKEN'), creds=creds, insecure=insecure) glance-12.0.0/glance/cmd/0000775000567000056710000000000012701407204016155 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/cmd/control.py0000664000567000056710000003306712701407047020225 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Helper script for starting/stopping/reloading Glance server programs. Thanks for some of the code, Swifties ;) """ from __future__ import print_function from __future__ import with_statement import argparse import fcntl import os import resource import signal import subprocess import sys import tempfile import time # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from oslo_config import cfg from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from glance.common import config from glance.i18n import _ CONF = cfg.CONF ALL_COMMANDS = ['start', 'status', 'stop', 'shutdown', 'restart', 'reload', 'force-reload'] ALL_SERVERS = ['api', 'registry', 'scrubber'] RELOAD_SERVERS = ['glance-api', 'glance-registry'] GRACEFUL_SHUTDOWN_SERVERS = ['glance-api', 'glance-registry', 'glance-scrubber'] MAX_DESCRIPTORS = 32768 MAX_MEMORY = 2 * units.Gi # 2 GB USAGE = """%(prog)s [options] [CONFPATH] Where is one of: all, {0} And command is one of: {1} And CONFPATH is the optional configuration file to use.""".format( ', '.join(ALL_SERVERS), ', '.join(ALL_COMMANDS)) exitcode = 0 def gated_by(predicate): def wrap(f): def wrapped_f(*args): if predicate: return f(*args) else: return None return wrapped_f return wrap def pid_files(server, pid_file): pid_files = [] if pid_file: if os.path.exists(os.path.abspath(pid_file)): pid_files = [os.path.abspath(pid_file)] else: if os.path.exists('/var/run/glance/%s.pid' % server): pid_files = ['/var/run/glance/%s.pid' % server] for pid_file in pid_files: pid = int(open(pid_file).read().strip()) yield pid_file, pid def do_start(verb, pid_file, server, args): if verb != 'Respawn' and pid_file == CONF.pid_file: for pid_file, pid in pid_files(server, pid_file): if os.path.exists('/proc/%s' % pid): print(_("%(serv)s appears to already be running: %(pid)s") % {'serv': server, 'pid': pid_file}) return else: print(_("Removing stale pid file %s") % pid_file) os.unlink(pid_file) try: resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) resource.setrlimit(resource.RLIMIT_DATA, (MAX_MEMORY, MAX_MEMORY)) except ValueError: print(_('Unable to increase file descriptor limit. ' 'Running as non-root?')) os.environ['PYTHON_EGG_CACHE'] = '/tmp' def write_pid_file(pid_file, pid): with open(pid_file, 'w') as fp: fp.write('%d\n' % pid) def redirect_to_null(fds): with open(os.devnull, 'r+b') as nullfile: for desc in fds: # close fds try: os.dup2(nullfile.fileno(), desc) except OSError: pass def redirect_to_syslog(fds, server): log_cmd = 'logger' log_cmd_params = '-t "%s[%d]"' % (server, os.getpid()) process = subprocess.Popen([log_cmd, log_cmd_params], stdin=subprocess.PIPE) for desc in fds: # pipe to logger command try: os.dup2(process.stdin.fileno(), desc) except OSError: pass def redirect_stdio(server, capture_output): input = [sys.stdin.fileno()] output = [sys.stdout.fileno(), sys.stderr.fileno()] redirect_to_null(input) if capture_output: redirect_to_syslog(output, server) else: redirect_to_null(output) @gated_by(CONF.capture_output) def close_stdio_on_exec(): fds = [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()] for desc in fds: # set close on exec flag fcntl.fcntl(desc, fcntl.F_SETFD, fcntl.FD_CLOEXEC) def launch(pid_file, conf_file=None, capture_output=False, await_time=0): args = [server] if conf_file: args += ['--config-file', conf_file] msg = (_('%(verb)sing %(serv)s with %(conf)s') % {'verb': verb, 'serv': server, 'conf': conf_file}) else: msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server}) print(msg) close_stdio_on_exec() pid = os.fork() if pid == 0: os.setsid() redirect_stdio(server, capture_output) try: os.execlp('%s' % server, *args) except OSError as e: msg = (_('unable to launch %(serv)s. Got error: %(e)s') % {'serv': server, 'e': e}) sys.exit(msg) sys.exit(0) else: write_pid_file(pid_file, pid) await_child(pid, await_time) return pid @gated_by(CONF.await_child) def await_child(pid, await_time): bail_time = time.time() + await_time while time.time() < bail_time: reported_pid, status = os.waitpid(pid, os.WNOHANG) if reported_pid == pid: global exitcode exitcode = os.WEXITSTATUS(status) break time.sleep(0.05) conf_file = None if args and os.path.exists(args[0]): conf_file = os.path.abspath(os.path.expanduser(args[0])) return launch(pid_file, conf_file, CONF.capture_output, CONF.await_child) def do_check_status(pid_file, server): if os.path.exists(pid_file): with open(pid_file, 'r') as pidfile: pid = pidfile.read().strip() print(_("%(serv)s (pid %(pid)s) is running...") % {'serv': server, 'pid': pid}) else: print(_("%s is stopped") % server) def get_pid_file(server, pid_file): pid_file = (os.path.abspath(pid_file) if pid_file else '/var/run/glance/%s.pid' % server) dir, file = os.path.split(pid_file) if not os.path.exists(dir): try: os.makedirs(dir) except OSError: pass if not os.access(dir, os.W_OK): fallback = os.path.join(tempfile.mkdtemp(), '%s.pid' % server) msg = (_('Unable to create pid file %(pid)s. Running as non-root?\n' 'Falling back to a temp file, you can stop %(service)s ' 'service using:\n' ' %(file)s %(server)s stop --pid-file %(fb)s') % {'pid': pid_file, 'service': server, 'file': __file__, 'server': server, 'fb': fallback}) print(msg) pid_file = fallback return pid_file def do_reload(pid_file, server): if server not in RELOAD_SERVERS: msg = (_('Reload of %(serv)s not supported') % {'serv': server}) sys.exit(msg) pid = None if os.path.exists(pid_file): with open(pid_file, 'r') as pidfile: pid = int(pidfile.read().strip()) else: msg = (_('Server %(serv)s is stopped') % {'serv': server}) sys.exit(msg) sig = signal.SIGHUP try: print(_('Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)') % {'serv': server, 'pid': pid, 'sig': sig}) os.kill(pid, sig) except OSError: print(_("Process %d not running") % pid) def do_stop(server, args, graceful=False): if graceful and server in GRACEFUL_SHUTDOWN_SERVERS: sig = signal.SIGHUP else: sig = signal.SIGTERM did_anything = False pfiles = pid_files(server, CONF.pid_file) for pid_file, pid in pfiles: did_anything = True try: os.unlink(pid_file) except OSError: pass try: print(_('Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)') % {'serv': server, 'pid': pid, 'sig': sig}) os.kill(pid, sig) except OSError: print(_("Process %d not running") % pid) for pid_file, pid in pfiles: for _junk in range(150): # 15 seconds if not os.path.exists('/proc/%s' % pid): break time.sleep(0.1) else: print(_('Waited 15 seconds for pid %(pid)s (%(file)s) to die;' ' giving up') % {'pid': pid, 'file': pid_file}) if not did_anything: print(_('%s is already stopped') % server) def add_command_parsers(subparsers): cmd_parser = argparse.ArgumentParser(add_help=False) cmd_subparsers = cmd_parser.add_subparsers(dest='command') for cmd in ALL_COMMANDS: parser = cmd_subparsers.add_parser(cmd) parser.add_argument('args', nargs=argparse.REMAINDER) for server in ALL_SERVERS: full_name = 'glance-' + server parser = subparsers.add_parser(server, parents=[cmd_parser]) parser.set_defaults(servers=[full_name]) parser = subparsers.add_parser(full_name, parents=[cmd_parser]) parser.set_defaults(servers=[full_name]) parser = subparsers.add_parser('all', parents=[cmd_parser]) parser.set_defaults(servers=['glance-' + s for s in ALL_SERVERS]) def main(): global exitcode opts = [ cfg.SubCommandOpt('server', title='Server types', help='Available server types', handler=add_command_parsers), cfg.StrOpt('pid-file', metavar='PATH', help='File to use as pid file. Default: ' '/var/run/glance/$server.pid.'), cfg.IntOpt('await-child', metavar='DELAY', default=0, help='Period to wait for service death ' 'in order to report exit code ' '(default is to not wait at all).'), cfg.BoolOpt('capture-output', default=False, help='Capture stdout/err in syslog ' 'instead of discarding it.'), cfg.BoolOpt('respawn', default=False, help='Restart service on unexpected death.'), ] CONF.register_cli_opts(opts) config.parse_args(usage=USAGE) @gated_by(CONF.await_child) @gated_by(CONF.respawn) def mutually_exclusive(): sys.stderr.write('--await-child and --respawn are mutually exclusive') sys.exit(1) mutually_exclusive() @gated_by(CONF.respawn) def anticipate_respawn(children): while children: pid, status = os.wait() if pid in children: (pid_file, server, args) = children.pop(pid) running = os.path.exists(pid_file) one_second_ago = time.time() - 1 bouncing = (running and os.path.getmtime(pid_file) >= one_second_ago) if running and not bouncing: args = (pid_file, server, args) new_pid = do_start('Respawn', *args) children[new_pid] = args else: rsn = 'bouncing' if bouncing else 'deliberately stopped' print(_('Suppressed respawn as %(serv)s was %(rsn)s.') % {'serv': server, 'rsn': rsn}) if CONF.server.command == 'start': children = {} for server in CONF.server.servers: pid_file = get_pid_file(server, CONF.pid_file) args = (pid_file, server, CONF.server.args) pid = do_start('Start', *args) children[pid] = args anticipate_respawn(children) if CONF.server.command == 'status': for server in CONF.server.servers: pid_file = get_pid_file(server, CONF.pid_file) do_check_status(pid_file, server) if CONF.server.command == 'stop': for server in CONF.server.servers: do_stop(server, CONF.server.args) if CONF.server.command == 'shutdown': for server in CONF.server.servers: do_stop(server, CONF.server.args, graceful=True) if CONF.server.command == 'restart': for server in CONF.server.servers: do_stop(server, CONF.server.args) for server in CONF.server.servers: pid_file = get_pid_file(server, CONF.pid_file) do_start('Restart', pid_file, server, CONF.server.args) if CONF.server.command in ('reload', 'force-reload'): for server in CONF.server.servers: pid_file = get_pid_file(server, CONF.pid_file) do_reload(pid_file, server) sys.exit(exitcode) glance-12.0.0/glance/cmd/manage.py0000775000567000056710000003323712701407047017777 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Management Utility """ from __future__ import print_function # FIXME(sirp): When we have glance-admin we can consider merging this into it # Perhaps for consistency with Nova, we would then rename glance-admin -> # glance-manage (or the other way around) import os import sys import time # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from oslo_config import cfg from oslo_db.sqlalchemy import migration from oslo_log import log as logging from oslo_utils import encodeutils import six from glance.common import config from glance.common import exception from glance import context from glance.db import migration as db_migration from glance.db.sqlalchemy import api as db_api from glance.db.sqlalchemy import metadata from glance.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator class DbCommands(object): """Class for managing the db""" def __init__(self): pass def version(self): """Print database's current migration level""" print(migration.db_version(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, db_migration.INIT_VERSION)) @args('--version', metavar='', help='Database version') def upgrade(self, version=None): """Upgrade the database's migration level""" migration.db_sync(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version) @args('--version', metavar='', help='Database version') def downgrade(self, version=None): """Downgrade the database's migration level""" print("Warning: DB downgrade is deprecated and will be removed in N " "release. Users should make a full database backup of the " "production data before attempting any upgrade.", file=sys.stderr) migration.db_sync(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version) @args('--version', metavar='', help='Database version') def version_control(self, version=None): """Place a database under migration control""" migration.db_version_control(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version) @args('--version', metavar='', help='Database version') @args('--current_version', metavar='', help='Current Database version') def sync(self, version=None, current_version=None): """ Place a database under migration control and upgrade/downgrade it, creating first if necessary. """ if current_version not in (None, 'None'): migration.db_version_control(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version=current_version) migration.db_sync(db_api.get_engine(), db_migration.MIGRATE_REPO_PATH, version) @args('--path', metavar='', help='Path to the directory or file ' 'where json metadata is stored') @args('--merge', action='store_true', help='Merge files with data that is in the database. By default it ' 'prefers existing data over new. This logic can be changed by ' 'combining --merge option with one of these two options: ' '--prefer_new or --overwrite.') @args('--prefer_new', action='store_true', help='Prefer new metadata over existing. Existing metadata ' 'might be overwritten. Needs to be combined with --merge ' 'option.') @args('--overwrite', action='store_true', help='Drop and rewrite metadata. Needs to be combined with --merge ' 'option') def load_metadefs(self, path=None, merge=False, prefer_new=False, overwrite=False): """Load metadefinition json files to database""" metadata.db_load_metadefs(db_api.get_engine(), path, merge, prefer_new, overwrite) def unload_metadefs(self): """Unload metadefinitions from database""" metadata.db_unload_metadefs(db_api.get_engine()) @args('--path', metavar='', help='Path to the directory where ' 'json metadata files should be ' 'saved.') def export_metadefs(self, path=None): """Export metadefinitions data from database to files""" metadata.db_export_metadefs(db_api.get_engine(), path) @args('--age_in_days', type=int, help='Purge deleted rows older than age in days') @args('--max_rows', type=int, help='Limit number of records to delete') def purge(self, age_in_days=30, max_rows=100): """Purge deleted rows older than a given age from glance tables.""" age_in_days = int(age_in_days) max_rows = int(max_rows) if age_in_days <= 0: sys.exit(_("Must supply a positive, non-zero value for age.")) if age_in_days >= (int(time.time()) / 86400): sys.exit(_("Maximal age is count of days since epoch.")) if max_rows < 1: sys.exit(_("Minimal rows limit is 1.")) ctx = context.get_admin_context(show_deleted=True) db_api.purge_deleted_rows(ctx, age_in_days, max_rows) class DbLegacyCommands(object): """Class for managing the db using legacy commands""" def __init__(self, command_object): self.command_object = command_object def version(self): self.command_object.version() def upgrade(self, version=None): self.command_object.upgrade(CONF.command.version) def downgrade(self, version=None): print("Warning: DB downgrade is deprecated and will be removed in N " "release. Users should make a full database backup of the " "production data before attempting any upgrade.", file=sys.stderr) self.command_object.downgrade(CONF.command.version) def version_control(self, version=None): self.command_object.version_control(CONF.command.version) def sync(self, version=None, current_version=None): self.command_object.sync(CONF.command.version, CONF.command.current_version) def load_metadefs(self, path=None, merge=False, prefer_new=False, overwrite=False): self.command_object.load_metadefs(CONF.command.path, CONF.command.merge, CONF.command.prefer_new, CONF.command.overwrite) def unload_metadefs(self): self.command_object.unload_metadefs() def export_metadefs(self, path=None): self.command_object.export_metadefs(CONF.command.path) def add_legacy_command_parsers(command_object, subparsers): legacy_command_object = DbLegacyCommands(command_object) parser = subparsers.add_parser('db_version') parser.set_defaults(action_fn=legacy_command_object.version) parser.set_defaults(action='db_version') parser = subparsers.add_parser('db_upgrade') parser.set_defaults(action_fn=legacy_command_object.upgrade) parser.add_argument('version', nargs='?') parser.set_defaults(action='db_upgrade') parser = subparsers.add_parser('db_downgrade') parser.set_defaults(action_fn=legacy_command_object.downgrade) parser.add_argument('version') parser.set_defaults(action='db_downgrade') parser = subparsers.add_parser('db_version_control') parser.set_defaults(action_fn=legacy_command_object.version_control) parser.add_argument('version', nargs='?') parser.set_defaults(action='db_version_control') parser = subparsers.add_parser('db_sync') parser.set_defaults(action_fn=legacy_command_object.sync) parser.add_argument('version', nargs='?') parser.add_argument('current_version', nargs='?') parser.set_defaults(action='db_sync') parser = subparsers.add_parser('db_load_metadefs') parser.set_defaults(action_fn=legacy_command_object.load_metadefs) parser.add_argument('path', nargs='?') parser.add_argument('merge', nargs='?') parser.add_argument('prefer_new', nargs='?') parser.add_argument('overwrite', nargs='?') parser.set_defaults(action='db_load_metadefs') parser = subparsers.add_parser('db_unload_metadefs') parser.set_defaults(action_fn=legacy_command_object.unload_metadefs) parser.set_defaults(action='db_unload_metadefs') parser = subparsers.add_parser('db_export_metadefs') parser.set_defaults(action_fn=legacy_command_object.export_metadefs) parser.add_argument('path', nargs='?') parser.set_defaults(action='db_export_metadefs') def add_command_parsers(subparsers): command_object = DbCommands() parser = subparsers.add_parser('db') parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): # FIXME(basha): hack to assume dest is the arg name without # the leading hyphens if no dest is supplied kwargs.setdefault('dest', args[0][2:]) if kwargs['dest'].startswith('action_kwarg_'): action_kwargs.append( kwargs['dest'][len('action_kwarg_'):]) else: action_kwargs.append(kwargs['dest']) kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) parser.add_argument('action_args', nargs='*') add_legacy_command_parsers(command_object, subparsers) command_opt = cfg.SubCommandOpt('command', title='Commands', help='Available commands', handler=add_command_parsers) CATEGORIES = { 'db': DbCommands, } def methods_of(obj): """Get all callable methods of an object that don't start with underscore returns a list of tuples of the form (method_name, method) """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def main(): CONF.register_cli_opt(command_opt) if len(sys.argv) < 2: script_name = sys.argv[0] print("%s category action []" % script_name) print(_("Available categories:")) for category in CATEGORIES: print(_("\t%s") % category) sys.exit(2) try: logging.register_options(CONF) cfg_files = cfg.find_config_files(project='glance', prog='glance-registry') cfg_files.extend(cfg.find_config_files(project='glance', prog='glance-api')) cfg_files.extend(cfg.find_config_files(project='glance', prog='glance-manage')) config.parse_args(default_config_files=cfg_files) config.set_config_defaults() logging.setup(CONF, 'glance') except RuntimeError as e: sys.exit("ERROR: %s" % e) try: if CONF.command.action.startswith('db'): return CONF.command.action_fn() else: func_kwargs = {} for k in CONF.command.action_kwargs: v = getattr(CONF.command, 'action_kwarg_' + k) if v is None: continue if isinstance(v, six.string_types): v = encodeutils.safe_decode(v) func_kwargs[k] = v func_args = [encodeutils.safe_decode(arg) for arg in CONF.command.action_args] return CONF.command.action_fn(*func_args, **func_kwargs) except exception.GlanceException as e: sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) if __name__ == '__main__': main() glance-12.0.0/glance/cmd/__init__.py0000664000567000056710000000364512701407047020303 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import oslo_utils.strutils as strutils from glance import i18n try: import dns # NOQA except ImportError: dnspython_installed = False else: dnspython_installed = True def fix_greendns_ipv6(): if dnspython_installed: # All of this is because if dnspython is present in your environment # then eventlet monkeypatches socket.getaddrinfo() with an # implementation which doesn't work for IPv6. What we're checking here # is that the magic environment variable was set when the import # happened. nogreendns = 'EVENTLET_NO_GREENDNS' flag = os.environ.get(nogreendns, '') if 'eventlet' in sys.modules and not strutils.bool_from_string(flag): msg = i18n._("It appears that the eventlet module has been " "imported prior to setting %s='yes'. It is currently " "necessary to disable eventlet.greendns " "if using ipv6 since eventlet.greendns currently " "breaks with ipv6 addresses. Please ensure that " "eventlet is not imported prior to this being set.") raise ImportError(msg % nogreendns) os.environ[nogreendns] = 'yes' i18n.enable_lazy() fix_greendns_ipv6() glance-12.0.0/glance/cmd/registry.py0000775000567000056710000000543612701407047020417 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Reference implementation server for Glance Registry """ import os import sys import eventlet from oslo_utils import encodeutils # Monkey patch socket and time eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from oslo_config import cfg from oslo_log import log as logging import oslo_messaging import osprofiler.notifier import osprofiler.web from glance.common import config from glance.common import wsgi from glance import notifier CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") logging.register_options(CONF) def main(): try: config.parse_args() config.set_config_defaults() wsgi.set_eventlet_hub() logging.setup(CONF, 'glance') notifier.set_defaults() if cfg.CONF.profiler.enabled: _notifier = osprofiler.notifier.create("Messaging", oslo_messaging, {}, notifier.get_transport(), "glance", "registry", cfg.CONF.bind_host) osprofiler.notifier.set(_notifier) osprofiler.web.enable(cfg.CONF.profiler.hmac_keys) else: osprofiler.web.disable() server = wsgi.Server() server.start(config.load_paste_app('glance-registry'), default_port=9191) server.wait() except RuntimeError as e: sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) if __name__ == '__main__': main() glance-12.0.0/glance/cmd/cache_pruner.py0000664000567000056710000000324712701407047021200 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Image Cache Pruner This is meant to be run as a periodic task, perhaps every half-hour. """ import os import sys from oslo_log import log as logging # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from glance.common import config from glance.image_cache import pruner CONF = config.CONF logging.register_options(CONF) def main(): try: config.parse_cache_args() logging.setup(CONF, 'glance') app = pruner.Pruner() app.run() except RuntimeError as e: sys.exit("ERROR: %s" % e) glance-12.0.0/glance/cmd/cache_cleaner.py0000664000567000056710000000407212701407047021273 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Image Cache Invalid Cache Entry and Stalled Image cleaner This is meant to be run as a periodic task from cron. If something goes wrong while we're caching an image (for example the fetch times out, or an exception is raised), we create an 'invalid' entry. These entires are left around for debugging purposes. However, after some period of time, we want to clean these up. Also, if an incomplete image hangs around past the image_cache_stall_time period, we automatically sweep it up. """ import os import sys from oslo_log import log as logging # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from glance.common import config from glance.image_cache import cleaner CONF = config.CONF logging.register_options(CONF) def main(): try: config.parse_cache_args() logging.setup(CONF, 'glance') app = cleaner.Cleaner() app.run() except RuntimeError as e: sys.exit("ERROR: %s" % e) glance-12.0.0/glance/cmd/replicator.py0000775000567000056710000006234112701407047020711 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2012 Michael Still and Canonical Inc # Copyright 2014 SoftLayer Technologies, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import os import sys from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import uuidutils import six from six.moves import http_client import six.moves.urllib.parse as urlparse from webob import exc from glance.common import config from glance.common import exception from glance.common import utils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) # NOTE: positional arguments will be parsed before until # this bug is corrected https://bugs.launchpad.net/oslo.config/+bug/1392428 cli_opts = [ cfg.IntOpt('chunksize', short='c', default=65536, help="Amount of data to transfer per HTTP write."), cfg.StrOpt('dontreplicate', short='D', default=('created_at date deleted_at location updated_at'), help="List of fields to not replicate."), cfg.BoolOpt('metaonly', short='m', default=False, help="Only replicate metadata, not images."), cfg.StrOpt('token', short='t', default='', help=("Pass in your authentication token if you have " "one. If you use this option the same token is " "used for both the master and the slave.")), cfg.StrOpt('mastertoken', short='M', default='', help=("Pass in your authentication token if you have " "one. This is the token used for the master.")), cfg.StrOpt('slavetoken', short='S', default='', help=("Pass in your authentication token if you have " "one. This is the token used for the slave.")), cfg.StrOpt('command', positional=True, help="Command to be given to replicator"), cfg.ListOpt('args', positional=True, help="Arguments for the command"), ] CONF = cfg.CONF CONF.register_cli_opts(cli_opts) logging.register_options(CONF) # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) COMMANDS = """Commands: help Output help for one of the commands below compare What is missing from the slave glance? dump Dump the contents of a glance instance to local disk. livecopy Load the contents of one glance instance into another. load Load the contents of a local directory into glance. size Determine the size of a glance instance if dumped to disk. """ IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on ' 'the slave, but our check for it did ' 'not find it. This indicates that we ' 'do not have permissions to see all ' 'the images on the slave server.') class ImageService(object): def __init__(self, conn, auth_token): """Initialize the ImageService. conn: a http_client.HTTPConnection to the glance server auth_token: authentication token to pass in the x-auth-token header """ self.auth_token = auth_token self.conn = conn def _http_request(self, method, url, headers, body, ignore_result_body=False): """Perform an HTTP request against the server. method: the HTTP method to use url: the URL to request (not including server portion) headers: headers for the request body: body to send with the request ignore_result_body: the body of the result will be ignored Returns: a http_client response object """ if self.auth_token: headers.setdefault('x-auth-token', self.auth_token) LOG.debug('Request: %(method)s http://%(server)s:%(port)s' '%(url)s with headers %(headers)s', {'method': method, 'server': self.conn.host, 'port': self.conn.port, 'url': url, 'headers': repr(headers)}) self.conn.request(method, url, body, headers) response = self.conn.getresponse() headers = self._header_list_to_dict(response.getheaders()) code = response.status code_description = http_client.responses[code] LOG.debug('Response: %(code)s %(status)s %(headers)s', {'code': code, 'status': code_description, 'headers': repr(headers)}) if code == 400: raise exc.HTTPBadRequest( explanation=response.read()) if code == 500: raise exc.HTTPInternalServerError( explanation=response.read()) if code == 401: raise exc.HTTPUnauthorized( explanation=response.read()) if code == 403: raise exc.HTTPForbidden( explanation=response.read()) if code == 409: raise exc.HTTPConflict( explanation=response.read()) if ignore_result_body: # NOTE: because we are pipelining requests through a single HTTP # connection, http_client requires that we read the response body # before we can make another request. If the caller knows they # don't care about the body, they can ask us to do that for them. response.read() return response def get_images(self): """Return a detailed list of images. Yields a series of images as dicts containing metadata. """ params = {'is_public': None} while True: url = '/v1/images/detail' query = urlparse.urlencode(params) if query: url += '?%s' % query response = self._http_request('GET', url, {}, '') result = jsonutils.loads(response.read()) if not result or 'images' not in result or not result['images']: return for image in result.get('images', []): params['marker'] = image['id'] yield image def get_image(self, image_uuid): """Fetch image data from glance. image_uuid: the id of an image Returns: a http_client Response object where the body is the image. """ url = '/v1/images/%s' % image_uuid return self._http_request('GET', url, {}, '') @staticmethod def _header_list_to_dict(headers): """Expand a list of headers into a dictionary. headers: a list of [(key, value), (key, value), (key, value)] Returns: a dictionary representation of the list """ d = {} for (header, value) in headers: if header.startswith('x-image-meta-property-'): prop = header.replace('x-image-meta-property-', '') d.setdefault('properties', {}) d['properties'][prop] = value else: d[header.replace('x-image-meta-', '')] = value return d def get_image_meta(self, image_uuid): """Return the metadata for a single image. image_uuid: the id of an image Returns: image metadata as a dictionary """ url = '/v1/images/%s' % image_uuid response = self._http_request('HEAD', url, {}, '', ignore_result_body=True) return self._header_list_to_dict(response.getheaders()) @staticmethod def _dict_to_headers(d): """Convert a dictionary into one suitable for a HTTP request. d: a dictionary Returns: the same dictionary, with x-image-meta added to every key """ h = {} for key in d: if key == 'properties': for subkey in d[key]: if d[key][subkey] is None: h['x-image-meta-property-%s' % subkey] = '' else: h['x-image-meta-property-%s' % subkey] = d[key][subkey] else: h['x-image-meta-%s' % key] = d[key] return h def add_image(self, image_meta, image_data): """Upload an image. image_meta: image metadata as a dictionary image_data: image data as a object with a read() method Returns: a tuple of (http response headers, http response body) """ url = '/v1/images' headers = self._dict_to_headers(image_meta) headers['Content-Type'] = 'application/octet-stream' headers['Content-Length'] = int(image_meta['size']) response = self._http_request('POST', url, headers, image_data) headers = self._header_list_to_dict(response.getheaders()) LOG.debug('Image post done') body = response.read() return headers, body def add_image_meta(self, image_meta): """Update image metadata. image_meta: image metadata as a dictionary Returns: a tuple of (http response headers, http response body) """ url = '/v1/images/%s' % image_meta['id'] headers = self._dict_to_headers(image_meta) headers['Content-Type'] = 'application/octet-stream' response = self._http_request('PUT', url, headers, '') headers = self._header_list_to_dict(response.getheaders()) LOG.debug('Image post done') body = response.read() return headers, body def get_image_service(): """Get a copy of the image service. This is done like this to make it easier to mock out ImageService. """ return ImageService def replication_size(options, args): """%(prog)s size Determine the size of a glance instance if dumped to disk. server:port: the location of the glance instance. """ # Make sure server info is provided if len(args) < 1: raise TypeError(_("Too few arguments.")) server, port = utils.parse_valid_host_port(args.pop()) total_size = 0 count = 0 imageservice = get_image_service() client = imageservice(http_client.HTTPConnection(server, port), options.slavetoken) for image in client.get_images(): LOG.debug('Considering image: %(image)s', {'image': image}) if image['status'] == 'active': total_size += int(image['size']) count += 1 print(_('Total size is %(size)d bytes across %(img_count)d images') % {'size': total_size, 'img_count': count}) def replication_dump(options, args): """%(prog)s dump Dump the contents of a glance instance to local disk. server:port: the location of the glance instance. path: a directory on disk to contain the data. """ # Make sure server and path are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) path = args.pop() server, port = utils.parse_valid_host_port(args.pop()) imageservice = get_image_service() client = imageservice(http_client.HTTPConnection(server, port), options.mastertoken) for image in client.get_images(): LOG.debug('Considering: %s', image['id']) data_path = os.path.join(path, image['id']) if not os.path.exists(data_path): LOG.info(_LI('Storing: %s'), image['id']) # Dump glance information if six.PY3: f = open(data_path, 'w', encoding='utf-8') else: f = open(data_path, 'w') with f: f.write(jsonutils.dumps(image)) if image['status'] == 'active' and not options.metaonly: # Now fetch the image. The metadata returned in headers here # is the same as that which we got from the detailed images # request earlier, so we can ignore it here. Note that we also # only dump active images. LOG.debug('Image %s is active', image['id']) image_response = client.get_image(image['id']) with open(data_path + '.img', 'wb') as f: while True: chunk = image_response.read(options.chunksize) if not chunk: break f.write(chunk) def _dict_diff(a, b): """A one way dictionary diff. a: a dictionary b: a dictionary Returns: True if the dictionaries are different """ # Only things the master has which the slave lacks matter if set(a.keys()) - set(b.keys()): LOG.debug('metadata diff -- master has extra keys: %(keys)s', {'keys': ' '.join(set(a.keys()) - set(b.keys()))}) return True for key in a: if str(a[key]) != str(b[key]): LOG.debug('metadata diff -- value differs for key ' '%(key)s: master "%(master_value)s" vs ' 'slave "%(slave_value)s"', {'key': key, 'master_value': a[key], 'slave_value': b[key]}) return True return False def replication_load(options, args): """%(prog)s load Load the contents of a local directory into glance. server:port: the location of the glance instance. path: a directory on disk containing the data. """ # Make sure server and path are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) path = args.pop() server, port = utils.parse_valid_host_port(args.pop()) imageservice = get_image_service() client = imageservice(http_client.HTTPConnection(server, port), options.slavetoken) updated = [] for ent in os.listdir(path): if uuidutils.is_uuid_like(ent): image_uuid = ent LOG.info(_LI('Considering: %s'), image_uuid) meta_file_name = os.path.join(path, image_uuid) with open(meta_file_name) as meta_file: meta = jsonutils.loads(meta_file.read()) # Remove keys which don't make sense for replication for key in options.dontreplicate.split(' '): if key in meta: LOG.debug('Stripping %(header)s from saved ' 'metadata', {'header': key}) del meta[key] if _image_present(client, image_uuid): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. LOG.debug('Image %s already present', image_uuid) headers = client.get_image_meta(image_uuid) for key in options.dontreplicate.split(' '): if key in headers: LOG.debug('Stripping %(header)s from slave ' 'metadata', {'header': key}) del headers[key] if _dict_diff(meta, headers): LOG.info(_LI('Image %s metadata has changed'), image_uuid) headers, body = client.add_image_meta(meta) _check_upload_response_headers(headers, body) updated.append(meta['id']) else: if not os.path.exists(os.path.join(path, image_uuid + '.img')): LOG.debug('%s dump is missing image data, skipping', image_uuid) continue # Upload the image itself with open(os.path.join(path, image_uuid + '.img')) as img_file: try: headers, body = client.add_image(meta, img_file) _check_upload_response_headers(headers, body) updated.append(meta['id']) except exc.HTTPConflict: LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image_uuid) # noqa return updated def replication_livecopy(options, args): """%(prog)s livecopy Load the contents of one glance instance into another. fromserver:port: the location of the master glance instance. toserver:port: the location of the slave glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) imageservice = get_image_service() slave_server, slave_port = utils.parse_valid_host_port(args.pop()) slave_conn = http_client.HTTPConnection(slave_server, slave_port) slave_client = imageservice(slave_conn, options.slavetoken) master_server, master_port = utils.parse_valid_host_port(args.pop()) master_conn = http_client.HTTPConnection(master_server, master_port) master_client = imageservice(master_conn, options.mastertoken) updated = [] for image in master_client.get_images(): LOG.debug('Considering %(id)s', {'id': image['id']}) for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from master metadata', {'header': key}) del image[key] if _image_present(slave_client, image['id']): # NOTE(mikal): Perhaps we just need to update the metadata? # Note that we don't attempt to change an image file once it # has been uploaded. headers = slave_client.get_image_meta(image['id']) if headers['status'] == 'active': for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from master ' 'metadata', {'header': key}) del image[key] if key in headers: LOG.debug('Stripping %(header)s from slave ' 'metadata', {'header': key}) del headers[key] if _dict_diff(image, headers): LOG.info(_LI('Image %s metadata has changed'), image['id']) headers, body = slave_client.add_image_meta(image) _check_upload_response_headers(headers, body) updated.append(image['id']) elif image['status'] == 'active': LOG.info(_LI('Image %s is being synced'), image['id']) if not options.metaonly: image_response = master_client.get_image(image['id']) try: headers, body = slave_client.add_image(image, image_response) _check_upload_response_headers(headers, body) updated.append(image['id']) except exc.HTTPConflict: LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id']) # noqa return updated def replication_compare(options, args): """%(prog)s compare Compare the contents of fromserver with those of toserver. fromserver:port: the location of the master glance instance. toserver:port: the location of the slave glance instance. """ # Make sure from-server and to-server are provided if len(args) < 2: raise TypeError(_("Too few arguments.")) imageservice = get_image_service() slave_server, slave_port = utils.parse_valid_host_port(args.pop()) slave_conn = http_client.HTTPConnection(slave_server, slave_port) slave_client = imageservice(slave_conn, options.slavetoken) master_server, master_port = utils.parse_valid_host_port(args.pop()) master_conn = http_client.HTTPConnection(master_server, master_port) master_client = imageservice(master_conn, options.mastertoken) differences = {} for image in master_client.get_images(): if _image_present(slave_client, image['id']): headers = slave_client.get_image_meta(image['id']) for key in options.dontreplicate.split(' '): if key in image: LOG.debug('Stripping %(header)s from master metadata', {'header': key}) del image[key] if key in headers: LOG.debug('Stripping %(header)s from slave metadata', {'header': key}) del headers[key] for key in image: if image[key] != headers.get(key, None): LOG.warn(_LW('%(image_id)s: field %(key)s differs ' '(source is %(master_value)s, destination ' 'is %(slave_value)s)') % {'image_id': image['id'], 'key': key, 'master_value': image[key], 'slave_value': headers.get(key, 'undefined')}) differences[image['id']] = 'diff' else: LOG.debug('%(image_id)s is identical', {'image_id': image['id']}) elif image['status'] == 'active': LOG.warn(_LW('Image %s entirely missing from the destination') % image['id']) differences[image['id']] = 'missing' return differences def _check_upload_response_headers(headers, body): """Check that the headers of an upload are reasonable. headers: the headers from the upload body: the body from the upload """ if 'status' not in headers: try: d = jsonutils.loads(body) if 'image' in d and 'status' in d['image']: return except Exception: raise exception.UploadException(body) def _image_present(client, image_uuid): """Check if an image is present in glance. client: the ImageService image_uuid: the image uuid to check Returns: True if the image is present """ headers = client.get_image_meta(image_uuid) return 'status' in headers def print_help(options, args): """Print help specific to a command. options: the parsed command line options args: the command line """ if not args: print(COMMANDS) else: command_name = args.pop() command = lookup_command(command_name) print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])}) def lookup_command(command_name): """Lookup a command. command_name: the command name Returns: a method which implements that command """ BASE_COMMANDS = {'help': print_help} REPLICATION_COMMANDS = {'compare': replication_compare, 'dump': replication_dump, 'livecopy': replication_livecopy, 'load': replication_load, 'size': replication_size} commands = {} for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS): commands.update(command_set) try: command = commands[command_name] except KeyError: if command_name: sys.exit(_("Unknown command: %s") % command_name) else: command = commands['help'] return command def main(): """The main function.""" try: config.parse_args() except RuntimeError as e: sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) except SystemExit as e: sys.exit("Please specify one command") # Setup logging logging.setup(CONF, 'glance') if CONF.token: CONF.slavetoken = CONF.token CONF.mastertoken = CONF.token command = lookup_command(CONF.command) try: command(CONF, CONF.args) except TypeError as e: LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) except ValueError as e: LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa sys.exit("ERROR: %s" % encodeutils.exception_to_unicode(e)) if __name__ == '__main__': main() glance-12.0.0/glance/cmd/cache_prefetcher.py0000775000567000056710000000344612701407047022020 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Image Cache Pre-fetcher This is meant to be run from the command line after queueing images to be pretched. """ import os import sys # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) import glance_store from oslo_log import log as logging from glance.common import config from glance.image_cache import prefetcher CONF = config.CONF logging.register_options(CONF) def main(): try: config.parse_cache_args() logging.setup(CONF, 'glance') glance_store.register_opts(config.CONF) glance_store.create_stores(config.CONF) glance_store.verify_default_store() app = prefetcher.Prefetcher() app.run() except RuntimeError as e: sys.exit("ERROR: %s" % e) if __name__ == '__main__': main() glance-12.0.0/glance/cmd/glare.py0000775000567000056710000000512512701407047017634 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glare (Glance Artifact Repository) API service """ import sys import eventlet from oslo_utils import encodeutils eventlet.patcher.monkey_patch(all=False, socket=True, time=True, select=True, thread=True, os=True) import glance_store from oslo_config import cfg from oslo_log import log as logging import oslo_messaging import osprofiler.notifier import osprofiler.web from glance.common import config from glance.common import exception from glance.common import wsgi from glance import notifier CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") logging.register_options(CONF) KNOWN_EXCEPTIONS = (RuntimeError, exception.WorkerCreationFailure, glance_store.exceptions.BadStoreConfiguration) def fail(e): global KNOWN_EXCEPTIONS return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1 sys.stderr.write("ERROR: %s\n" % encodeutils.exception_to_unicode(e)) sys.exit(return_code) def main(): try: config.parse_args() wsgi.set_eventlet_hub() logging.setup(CONF, 'glare') if cfg.CONF.profiler.enabled: _notifier = osprofiler.notifier.create("Messaging", oslo_messaging, {}, notifier.get_transport(), "glance", "artifacts", cfg.CONF.bind_host) osprofiler.notifier.set(_notifier) else: osprofiler.web.disable() server = wsgi.Server(initialize_glance_store=True) server.start(config.load_paste_app('glare-api'), default_port=9494) server.wait() except KNOWN_EXCEPTIONS as e: fail(e) if __name__ == '__main__': main() glance-12.0.0/glance/cmd/scrubber.py0000775000567000056710000000416012701407047020347 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance Scrub Service """ import os import sys # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) import eventlet import glance_store from oslo_config import cfg from oslo_log import log as logging from glance.common import config from glance import scrubber eventlet.patcher.monkey_patch(all=False, socket=True, time=True, select=True, thread=True, os=True) CONF = cfg.CONF logging.register_options(CONF) def main(): CONF.register_cli_opts(scrubber.scrubber_cmd_cli_opts) CONF.register_opts(scrubber.scrubber_cmd_opts) try: config.parse_args() logging.setup(CONF, 'glance') glance_store.register_opts(config.CONF) glance_store.create_stores(config.CONF) glance_store.verify_default_store() app = scrubber.Scrubber(glance_store) if CONF.daemon: server = scrubber.Daemon(CONF.wakeup_time) server.start(app) server.wait() else: app.run() except RuntimeError as e: sys.exit("ERROR: %s" % e) if __name__ == '__main__': main() glance-12.0.0/glance/cmd/cache_manage.py0000775000567000056710000004037012701407047021116 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A simple cache management utility for Glance. """ from __future__ import print_function import datetime import functools import optparse import os import sys import time from oslo_utils import encodeutils import prettytable from six.moves import input # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from glance.common import exception import glance.image_cache.client from glance.version import version_info as version SUCCESS = 0 FAILURE = 1 def catch_error(action): """Decorator to provide sensible default error handling for actions.""" def wrap(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: ret = func(*args, **kwargs) return SUCCESS if ret is None else ret except exception.NotFound: options = args[0] print("Cache management middleware not enabled on host %s" % options.host) return FAILURE except exception.Forbidden: print("Not authorized to make this request.") return FAILURE except Exception as e: options = args[0] if options.debug: raise print("Failed to %s. Got error:" % action) pieces = encodeutils.exception_to_unicode(e).split('\n') for piece in pieces: print(piece) return FAILURE return wrapper return wrap @catch_error('show cached images') def list_cached(options, args): """%(prog)s list-cached [options] List all images currently cached. """ client = get_client(options) images = client.get_cached_images() if not images: print("No cached images.") return SUCCESS print("Found %d cached images..." % len(images)) pretty_table = prettytable.PrettyTable(("ID", "Last Accessed (UTC)", "Last Modified (UTC)", "Size", "Hits")) pretty_table.align['Size'] = "r" pretty_table.align['Hits'] = "r" for image in images: last_accessed = image['last_accessed'] if last_accessed == 0: last_accessed = "N/A" else: last_accessed = datetime.datetime.utcfromtimestamp( last_accessed).isoformat() pretty_table.add_row(( image['image_id'], last_accessed, datetime.datetime.utcfromtimestamp( image['last_modified']).isoformat(), image['size'], image['hits'])) print(pretty_table.get_string()) @catch_error('show queued images') def list_queued(options, args): """%(prog)s list-queued [options] List all images currently queued for caching. """ client = get_client(options) images = client.get_queued_images() if not images: print("No queued images.") return SUCCESS print("Found %d queued images..." % len(images)) pretty_table = prettytable.PrettyTable(("ID",)) for image in images: pretty_table.add_row((image,)) print(pretty_table.get_string()) @catch_error('queue the specified image for caching') def queue_image(options, args): """%(prog)s queue-image [options] Queues an image for caching """ if len(args) == 1: image_id = args.pop() else: print("Please specify one and only ID of the image you wish to ") print("queue from the cache as the first argument") return FAILURE if (not options.force and not user_confirm("Queue image %(image_id)s for caching?" % {'image_id': image_id}, default=False)): return SUCCESS client = get_client(options) client.queue_image_for_caching(image_id) if options.verbose: print("Queued image %(image_id)s for caching" % {'image_id': image_id}) return SUCCESS @catch_error('delete the specified cached image') def delete_cached_image(options, args): """ %(prog)s delete-cached-image [options] Deletes an image from the cache """ if len(args) == 1: image_id = args.pop() else: print("Please specify one and only ID of the image you wish to ") print("delete from the cache as the first argument") return FAILURE if (not options.force and not user_confirm("Delete cached image %(image_id)s?" % {'image_id': image_id}, default=False)): return SUCCESS client = get_client(options) client.delete_cached_image(image_id) if options.verbose: print("Deleted cached image %(image_id)s" % {'image_id': image_id}) return SUCCESS @catch_error('Delete all cached images') def delete_all_cached_images(options, args): """%(prog)s delete-all-cached-images [options] Remove all images from the cache. """ if (not options.force and not user_confirm("Delete all cached images?", default=False)): return SUCCESS client = get_client(options) num_deleted = client.delete_all_cached_images() if options.verbose: print("Deleted %(num_deleted)s cached images" % {'num_deleted': num_deleted}) return SUCCESS @catch_error('delete the specified queued image') def delete_queued_image(options, args): """ %(prog)s delete-queued-image [options] Deletes an image from the cache """ if len(args) == 1: image_id = args.pop() else: print("Please specify one and only ID of the image you wish to ") print("delete from the cache as the first argument") return FAILURE if (not options.force and not user_confirm("Delete queued image %(image_id)s?" % {'image_id': image_id}, default=False)): return SUCCESS client = get_client(options) client.delete_queued_image(image_id) if options.verbose: print("Deleted queued image %(image_id)s" % {'image_id': image_id}) return SUCCESS @catch_error('Delete all queued images') def delete_all_queued_images(options, args): """%(prog)s delete-all-queued-images [options] Remove all images from the cache queue. """ if (not options.force and not user_confirm("Delete all queued images?", default=False)): return SUCCESS client = get_client(options) num_deleted = client.delete_all_queued_images() if options.verbose: print("Deleted %(num_deleted)s queued images" % {'num_deleted': num_deleted}) return SUCCESS def get_client(options): """Return a new client object to a Glance server. specified by the --host and --port options supplied to the CLI """ return glance.image_cache.client.get_client( host=options.host, port=options.port, username=options.os_username, password=options.os_password, tenant=options.os_tenant_name, auth_url=options.os_auth_url, auth_strategy=options.os_auth_strategy, auth_token=options.os_auth_token, region=options.os_region_name, insecure=options.insecure) def env(*vars, **kwargs): """Search for the first defined of possibly many env vars. Returns the first environment variable defined in vars, or returns the default defined in kwargs. """ for v in vars: value = os.environ.get(v, None) if value: return value return kwargs.get('default', '') def create_options(parser): """Set up the CLI and config-file options that may be parsed and program commands. :param parser: The option parser """ parser.add_option('-v', '--verbose', default=False, action="store_true", help="Print more verbose output.") parser.add_option('-d', '--debug', default=False, action="store_true", help="Print debugging output.") parser.add_option('-H', '--host', metavar="ADDRESS", default="0.0.0.0", help="Address of Glance API host. " "Default: %default.") parser.add_option('-p', '--port', dest="port", metavar="PORT", type=int, default=9292, help="Port the Glance API host listens on. " "Default: %default.") parser.add_option('-k', '--insecure', dest="insecure", default=False, action="store_true", help="Explicitly allow glance to perform \"insecure\" " "SSL (https) requests. The server's certificate will " "not be verified against any certificate authorities. " "This option should be used with caution.") parser.add_option('-f', '--force', dest="force", metavar="FORCE", default=False, action="store_true", help="Prevent select actions from requesting " "user confirmation.") parser.add_option('--os-auth-token', dest='os_auth_token', default=env('OS_AUTH_TOKEN'), help='Defaults to env[OS_AUTH_TOKEN].') parser.add_option('-A', '--os_auth_token', '--auth_token', dest='os_auth_token', help=optparse.SUPPRESS_HELP) parser.add_option('--os-username', dest='os_username', default=env('OS_USERNAME'), help='Defaults to env[OS_USERNAME].') parser.add_option('-I', '--os_username', dest='os_username', help=optparse.SUPPRESS_HELP) parser.add_option('--os-password', dest='os_password', default=env('OS_PASSWORD'), help='Defaults to env[OS_PASSWORD].') parser.add_option('-K', '--os_password', dest='os_password', help=optparse.SUPPRESS_HELP) parser.add_option('--os-region-name', dest='os_region_name', default=env('OS_REGION_NAME'), help='Defaults to env[OS_REGION_NAME].') parser.add_option('-R', '--os_region_name', dest='os_region_name', help=optparse.SUPPRESS_HELP) parser.add_option('--os-tenant-id', dest='os_tenant_id', default=env('OS_TENANT_ID'), help='Defaults to env[OS_TENANT_ID].') parser.add_option('--os_tenant_id', dest='os_tenant_id', help=optparse.SUPPRESS_HELP) parser.add_option('--os-tenant-name', dest='os_tenant_name', default=env('OS_TENANT_NAME'), help='Defaults to env[OS_TENANT_NAME].') parser.add_option('-T', '--os_tenant_name', dest='os_tenant_name', help=optparse.SUPPRESS_HELP) parser.add_option('--os-auth-url', default=env('OS_AUTH_URL'), help='Defaults to env[OS_AUTH_URL].') parser.add_option('-N', '--os_auth_url', dest='os_auth_url', help=optparse.SUPPRESS_HELP) parser.add_option('-S', '--os_auth_strategy', dest="os_auth_strategy", metavar="STRATEGY", help="Authentication strategy (keystone or noauth).") def parse_options(parser, cli_args): """ Returns the parsed CLI options, command to run and its arguments, merged with any same-named options found in a configuration file :param parser: The option parser """ if not cli_args: cli_args.append('-h') # Show options in usage output... (options, args) = parser.parse_args(cli_args) # HACK(sirp): Make the parser available to the print_help method # print_help is a command, so it only accepts (options, args); we could # one-off have it take (parser, options, args), however, for now, I think # this little hack will suffice options.__parser = parser if not args: parser.print_usage() sys.exit(0) command_name = args.pop(0) command = lookup_command(parser, command_name) return (options, command, args) def print_help(options, args): """ Print help specific to a command """ parser = options.__parser if not args: parser.print_help() else: number_of_commands = len(args) if number_of_commands == 1: command_name = args.pop() command = lookup_command(parser, command_name) print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])}) else: sys.exit("Please specify one command") def lookup_command(parser, command_name): BASE_COMMANDS = {'help': print_help} CACHE_COMMANDS = { 'list-cached': list_cached, 'list-queued': list_queued, 'queue-image': queue_image, 'delete-cached-image': delete_cached_image, 'delete-all-cached-images': delete_all_cached_images, 'delete-queued-image': delete_queued_image, 'delete-all-queued-images': delete_all_queued_images, } commands = {} for command_set in (BASE_COMMANDS, CACHE_COMMANDS): commands.update(command_set) try: command = commands[command_name] except KeyError: parser.print_usage() sys.exit("Unknown command: %(cmd_name)s" % {'cmd_name': command_name}) return command def user_confirm(prompt, default=False): """Yes/No question dialog with user. :param prompt: question/statement to present to user (string) :param default: boolean value to return if empty string is received as response to prompt """ if default: prompt_default = "[Y/n]" else: prompt_default = "[y/N]" answer = input("%s %s " % (prompt, prompt_default)) if answer == "": return default else: return answer.lower() in ("yes", "y") def main(): usage = """ %prog [options] [args] Commands: help Output help for one of the commands below list-cached List all images currently cached list-queued List all images currently queued for caching queue-image Queue an image for caching delete-cached-image Purges an image from the cache delete-all-cached-images Removes all images from the cache delete-queued-image Deletes an image from the cache queue delete-all-queued-images Deletes all images from the cache queue """ version_string = version.cached_version_string() oparser = optparse.OptionParser(version=version_string, usage=usage.strip()) create_options(oparser) (options, command, args) = parse_options(oparser, sys.argv[1:]) try: start_time = time.time() result = command(options, args) end_time = time.time() if options.verbose: print("Completed in %-0.4f sec." % (end_time - start_time)) sys.exit(result) except (RuntimeError, NotImplementedError) as e: print("ERROR: ", e) if __name__ == '__main__': main() glance-12.0.0/glance/cmd/api.py0000775000567000056710000000624412701407047017316 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Glance API Server """ import os import sys import eventlet from oslo_utils import encodeutils # Monkey patch socket, time, select, threads eventlet.patcher.monkey_patch(all=False, socket=True, time=True, select=True, thread=True, os=True) # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) import glance_store from oslo_config import cfg from oslo_log import log as logging import oslo_messaging import osprofiler.notifier import osprofiler.web from glance.common import config from glance.common import exception from glance.common import wsgi from glance import notifier CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") logging.register_options(CONF) KNOWN_EXCEPTIONS = (RuntimeError, exception.WorkerCreationFailure, glance_store.exceptions.BadStoreConfiguration) def fail(e): global KNOWN_EXCEPTIONS return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1 sys.stderr.write("ERROR: %s\n" % encodeutils.exception_to_unicode(e)) sys.exit(return_code) def main(): try: config.parse_args() config.set_config_defaults() wsgi.set_eventlet_hub() logging.setup(CONF, 'glance') notifier.set_defaults() if cfg.CONF.profiler.enabled: _notifier = osprofiler.notifier.create("Messaging", oslo_messaging, {}, notifier.get_transport(), "glance", "api", cfg.CONF.bind_host) osprofiler.notifier.set(_notifier) osprofiler.web.enable(cfg.CONF.profiler.hmac_keys) else: osprofiler.web.disable() server = wsgi.Server(initialize_glance_store=True) server.start(config.load_paste_app('glance-api'), default_port=9292) server.wait() except KNOWN_EXCEPTIONS as e: fail(e) if __name__ == '__main__': main() glance-12.0.0/glance/location.py0000664000567000056710000004470712701407051017610 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy from cryptography import exceptions as crypto_exception import debtcollector import glance_store as store from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from glance.common import exception from glance.common import signature_utils from glance.common import utils import glance.domain.proxy from glance.i18n import _, _LE, _LI CONF = cfg.CONF LOG = logging.getLogger(__name__) class ImageRepoProxy(glance.domain.proxy.Repo): def __init__(self, image_repo, context, store_api, store_utils): self.context = context self.store_api = store_api proxy_kwargs = {'context': context, 'store_api': store_api, 'store_utils': store_utils} super(ImageRepoProxy, self).__init__(image_repo, item_proxy_class=ImageProxy, item_proxy_kwargs=proxy_kwargs) self.db_api = glance.db.get_api() def _set_acls(self, image): public = image.visibility == 'public' member_ids = [] if image.locations and not public: member_repo = _get_member_repo_for_store(image, self.context, self.db_api, self.store_api) member_ids = [m.member_id for m in member_repo.list()] for location in image.locations: self.store_api.set_acls(location['url'], public=public, read_tenants=member_ids, context=self.context) def add(self, image): result = super(ImageRepoProxy, self).add(image) self._set_acls(image) return result def save(self, image, from_state=None): result = super(ImageRepoProxy, self).save(image, from_state=from_state) self._set_acls(image) return result def _get_member_repo_for_store(image, context, db_api, store_api): image_member_repo = glance.db.ImageMemberRepo( context, db_api, image) store_image_repo = glance.location.ImageMemberRepoProxy( image_member_repo, image, context, store_api) return store_image_repo def _check_location_uri(context, store_api, store_utils, uri): """Check if an image location is valid. :param context: Glance request context :param store_api: store API module :param store_utils: store utils module :param uri: location's uri string """ try: # NOTE(zhiyan): Some stores return zero when it catch exception is_ok = (store_utils.validate_external_location(uri) and store_api.get_size_from_backend(uri, context=context) > 0) except (store.UnknownScheme, store.NotFound, store.BadStoreUri): is_ok = False if not is_ok: reason = _('Invalid location') raise exception.BadStoreUri(message=reason) def _check_image_location(context, store_api, store_utils, location): _check_location_uri(context, store_api, store_utils, location['url']) store_api.check_location_metadata(location['metadata']) def _set_image_size(context, image, locations): if not image.size: for location in locations: size_from_backend = store.get_size_from_backend( location['url'], context=context) if size_from_backend: # NOTE(flwang): This assumes all locations have the same size image.size = size_from_backend break def _count_duplicated_locations(locations, new): """ To calculate the count of duplicated locations for new one. :param locations: The exiting image location set :param new: The new image location :returns: The count of duplicated locations """ ret = 0 for loc in locations: if loc['url'] == new['url'] and loc['metadata'] == new['metadata']: ret += 1 return ret class ImageFactoryProxy(glance.domain.proxy.ImageFactory): def __init__(self, factory, context, store_api, store_utils): self.context = context self.store_api = store_api self.store_utils = store_utils proxy_kwargs = {'context': context, 'store_api': store_api, 'store_utils': store_utils} super(ImageFactoryProxy, self).__init__(factory, proxy_class=ImageProxy, proxy_kwargs=proxy_kwargs) def new_image(self, **kwargs): locations = kwargs.get('locations', []) for loc in locations: _check_image_location(self.context, self.store_api, self.store_utils, loc) loc['status'] = 'active' if _count_duplicated_locations(locations, loc) > 1: raise exception.DuplicateLocation(location=loc['url']) return super(ImageFactoryProxy, self).new_image(**kwargs) class StoreLocations(collections.MutableSequence): """ The proxy for store location property. It takes responsibility for: 1. Location uri correctness checking when adding a new location. 2. Remove the image data from the store when a location is removed from an image. """ def __init__(self, image_proxy, value): self.image_proxy = image_proxy if isinstance(value, list): self.value = value else: self.value = list(value) def append(self, location): # NOTE(flaper87): Insert this # location at the very end of # the value list. self.insert(len(self.value), location) def extend(self, other): if isinstance(other, StoreLocations): locations = other.value else: locations = list(other) for location in locations: self.append(location) def insert(self, i, location): _check_image_location(self.image_proxy.context, self.image_proxy.store_api, self.image_proxy.store_utils, location) location['status'] = 'active' if _count_duplicated_locations(self.value, location) > 0: raise exception.DuplicateLocation(location=location['url']) self.value.insert(i, location) _set_image_size(self.image_proxy.context, self.image_proxy, [location]) def pop(self, i=-1): location = self.value.pop(i) try: self.image_proxy.store_utils.delete_image_location_from_backend( self.image_proxy.context, self.image_proxy.image.image_id, location) except Exception: with excutils.save_and_reraise_exception(): self.value.insert(i, location) return location def count(self, location): return self.value.count(location) def index(self, location, *args): return self.value.index(location, *args) def remove(self, location): if self.count(location): self.pop(self.index(location)) else: self.value.remove(location) def reverse(self): self.value.reverse() # Mutable sequence, so not hashable __hash__ = None def __getitem__(self, i): return self.value.__getitem__(i) def __setitem__(self, i, location): _check_image_location(self.image_proxy.context, self.image_proxy.store_api, self.image_proxy.store_utils, location) location['status'] = 'active' self.value.__setitem__(i, location) _set_image_size(self.image_proxy.context, self.image_proxy, [location]) def __delitem__(self, i): if isinstance(i, slice): if i.step not in (None, 1): raise NotImplementedError("slice with step") self.__delslice__(i.start, i.stop) return location = None try: location = self.value[i] except Exception: del self.value[i] return self.image_proxy.store_utils.delete_image_location_from_backend( self.image_proxy.context, self.image_proxy.image.image_id, location) del self.value[i] def __delslice__(self, i, j): i = 0 if i is None else max(i, 0) j = len(self) if j is None else max(j, 0) locations = [] try: locations = self.value[i:j] except Exception: del self.value[i:j] return for location in locations: self.image_proxy.store_utils.delete_image_location_from_backend( self.image_proxy.context, self.image_proxy.image.image_id, location) del self.value[i] def __iadd__(self, other): self.extend(other) return self def __contains__(self, location): return location in self.value def __len__(self): return len(self.value) def __cast(self, other): if isinstance(other, StoreLocations): return other.value else: return other def __cmp__(self, other): return cmp(self.value, self.__cast(other)) def __eq__(self, other): return self.value == self.__cast(other) def __iter__(self): return iter(self.value) def __copy__(self): return type(self)(self.image_proxy, self.value) def __deepcopy__(self, memo): # NOTE(zhiyan): Only copy location entries, others can be reused. value = copy.deepcopy(self.value, memo) self.image_proxy.image.locations = value return type(self)(self.image_proxy, value) def _locations_proxy(target, attr): """ Make a location property proxy on the image object. :param target: the image object on which to add the proxy :param attr: the property proxy we want to hook """ def get_attr(self): value = getattr(getattr(self, target), attr) return StoreLocations(self, value) def set_attr(self, value): if not isinstance(value, (list, StoreLocations)): reason = _('Invalid locations') raise exception.BadStoreUri(message=reason) ori_value = getattr(getattr(self, target), attr) if ori_value != value: # NOTE(flwang): If all the URL of passed-in locations are same as # current image locations, that means user would like to only # update the metadata, not the URL. ordered_value = sorted([loc['url'] for loc in value]) ordered_ori = sorted([loc['url'] for loc in ori_value]) if len(ori_value) > 0 and ordered_value != ordered_ori: raise exception.Invalid(_('Original locations is not empty: ' '%s') % ori_value) # NOTE(zhiyan): Check locations are all valid # NOTE(flwang): If all the URL of passed-in locations are same as # current image locations, then it's not necessary to verify those # locations again. Otherwise, if there is any restricted scheme in # existing locations. _check_image_location will fail. if ordered_value != ordered_ori: for loc in value: _check_image_location(self.context, self.store_api, self.store_utils, loc) loc['status'] = 'active' if _count_duplicated_locations(value, loc) > 1: raise exception.DuplicateLocation(location=loc['url']) _set_image_size(self.context, getattr(self, target), value) else: for loc in value: loc['status'] = 'active' return setattr(getattr(self, target), attr, list(value)) def del_attr(self): value = getattr(getattr(self, target), attr) while len(value): self.store_utils.delete_image_location_from_backend( self.context, self.image.image_id, value[0]) del value[0] setattr(getattr(self, target), attr, value) return delattr(getattr(self, target), attr) return property(get_attr, set_attr, del_attr) class ImageProxy(glance.domain.proxy.Image): locations = _locations_proxy('image', 'locations') def __init__(self, image, context, store_api, store_utils): self.image = image self.context = context self.store_api = store_api self.store_utils = store_utils proxy_kwargs = { 'context': context, 'image': self, 'store_api': store_api, } super(ImageProxy, self).__init__( image, member_repo_proxy_class=ImageMemberRepoProxy, member_repo_proxy_kwargs=proxy_kwargs) def delete(self): self.image.delete() if self.image.locations: for location in self.image.locations: self.store_utils.delete_image_location_from_backend( self.context, self.image.image_id, location) def set_data(self, data, size=None): if size is None: size = 0 # NOTE(markwash): zero -> unknown size # Create the verifier for signature verification (if correct properties # are present) if (signature_utils.should_create_verifier( self.image.extra_properties)): # NOTE(bpoulos): if creating verifier fails, exception will be # raised verifier = signature_utils.get_verifier( self.context, self.image.extra_properties) else: verifier = None location, size, checksum, loc_meta = self.store_api.add_to_backend( CONF, self.image.image_id, utils.LimitingReader(utils.CooperativeReader(data), CONF.image_size_cap), size, context=self.context, verifier=verifier) self._verify_signature_if_needed(checksum) # NOTE(bpoulos): if verification fails, exception will be raised if verifier: try: verifier.verify() LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) except crypto_exception.InvalidSignature: raise exception.SignatureVerificationError( _('Signature verification failed') ) self.image.locations = [{'url': location, 'metadata': loc_meta, 'status': 'active'}] self.image.size = size self.image.checksum = checksum self.image.status = 'active' @debtcollector.removals.remove( message="This will be removed in the N cycle.") def _verify_signature_if_needed(self, checksum): # Verify the signature (if correct properties are present) if (signature_utils.should_verify_signature( self.image.extra_properties)): # NOTE(bpoulos): if verification fails, exception will be raised result = signature_utils.verify_signature( self.context, checksum, self.image.extra_properties) if result: LOG.info(_LI("Successfully verified signature for image %s"), self.image.image_id) def get_data(self, offset=0, chunk_size=None): if not self.image.locations: # NOTE(mclaren): This is the only set of arguments # which work with this exception currently, see: # https://bugs.launchpad.net/glance-store/+bug/1501443 # When the above glance_store bug is fixed we can # add a msg as usual. raise store.NotFound(image=None) err = None for loc in self.image.locations: try: data, size = self.store_api.get_from_backend( loc['url'], offset=offset, chunk_size=chunk_size, context=self.context) return data except Exception as e: LOG.warn(_('Get image %(id)s data failed: ' '%(err)s.') % {'id': self.image.image_id, 'err': encodeutils.exception_to_unicode(e)}) err = e # tried all locations LOG.error(_LE('Glance tried all active locations to get data for ' 'image %s but all have failed.') % self.image.image_id) raise err class ImageMemberRepoProxy(glance.domain.proxy.Repo): def __init__(self, repo, image, context, store_api): self.repo = repo self.image = image self.context = context self.store_api = store_api super(ImageMemberRepoProxy, self).__init__(repo) def _set_acls(self): public = self.image.visibility == 'public' if self.image.locations and not public: member_ids = [m.member_id for m in self.repo.list()] for location in self.image.locations: self.store_api.set_acls(location['url'], public=public, read_tenants=member_ids, context=self.context) def add(self, member): super(ImageMemberRepoProxy, self).add(member) self._set_acls() def remove(self, member): super(ImageMemberRepoProxy, self).remove(member) self._set_acls() glance-12.0.0/glance/contrib/0000775000567000056710000000000012701407204017052 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/__init__.py0000664000567000056710000000000012701407047021156 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/0000775000567000056710000000000012701407204020533 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/__init__.py0000664000567000056710000000000012701407047022637 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/0000775000567000056710000000000012701407204023472 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/v1_1/0000775000567000056710000000000012701407204024240 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/v1_1/__init__.py0000664000567000056710000000000012701407047026344 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/v1_1/image.py0000664000567000056710000000172712701407047025710 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common.glare import definitions import glance.contrib.plugins.image_artifact.v1.image as v1 class ImageAsAnArtifact(v1.ImageAsAnArtifact): __type_version__ = '1.1' icons = definitions.BinaryObjectList() similar_images = (definitions. ArtifactReferenceList(references=definitions. ArtifactReference('Image'))) glance-12.0.0/glance/contrib/plugins/image_artifact/setup.cfg0000664000567000056710000000136112701407047025321 0ustar jenkinsjenkins00000000000000[metadata] name = image_artifact_plugin version = 2.0 description = An artifact plugin for Imaging functionality author = Alexander Tivelkov author-email = ativelkov@mirantis.com classifier = Development Status :: 3 - Alpha License :: OSI Approved :: Apache Software License Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.2 Programming Language :: Python :: 3.3 Intended Audience :: Developers Environment :: Console [global] setup-hooks = pbr.hooks.setup_hook [entry_points] glance.artifacts.types = Image = glance.contrib.plugins.image_artifact.version_selector:versions glance-12.0.0/glance/contrib/plugins/image_artifact/__init__.py0000664000567000056710000000000012701407047025576 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/version_selector.py0000664000567000056710000000153312701407047027440 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.contrib.plugins.image_artifact.v1 import image as v1 from glance.contrib.plugins.image_artifact.v1_1 import image as v1_1 from glance.contrib.plugins.image_artifact.v2 import image as v2 versions = [v1.ImageAsAnArtifact, v1_1.ImageAsAnArtifact, v2.ImageAsAnArtifact] glance-12.0.0/glance/contrib/plugins/image_artifact/requirements.txt0000664000567000056710000000002412701407047026757 0ustar jenkinsjenkins00000000000000python-glanceclient glance-12.0.0/glance/contrib/plugins/image_artifact/v2/0000775000567000056710000000000012701407204024021 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/v2/__init__.py0000664000567000056710000000000012701407047026125 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/v2/image.py0000664000567000056710000000733612701407047025473 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import exception from glance.common.glare import definitions import glance.contrib.plugins.image_artifact.v1_1.image as v1_1 # Since this is not in the test-requirements.txt and the class below, # ImageAsAnArtifact, is pending removal a try except is added to prevent # an ImportError when module docs are generated try: import glanceclient except ImportError: glanceclient = None from glance.i18n import _ class ImageAsAnArtifact(v1_1.ImageAsAnArtifact): __type_version__ = '2.0' file = definitions.BinaryObject(required=False) legacy_image_id = definitions.String(required=False, mutable=False, pattern=R'[0-9a-f]{8}-[0-9a-f]{4}' R'-4[0-9a-f]{3}-[89ab]' R'[0-9a-f]{3}-[0-9a-f]{12}') def __pre_publish__(self, context, *args, **kwargs): super(ImageAsAnArtifact, self).__pre_publish__(*args, **kwargs) if self.file is None and self.legacy_image_id is None: raise exception.InvalidArtifactPropertyValue( message=_("Either a file or a legacy_image_id has to be " "specified") ) if self.file is not None and self.legacy_image_id is not None: raise exception.InvalidArtifactPropertyValue( message=_("Both file and legacy_image_id may not be " "specified at the same time")) if self.legacy_image_id: glance_endpoint = next(service['endpoints'][0]['publicURL'] for service in context.service_catalog if service['name'] == 'glance') # Ensure glanceclient is imported correctly since we are catching # the ImportError on initialization if glanceclient == None: raise ImportError(_("Glance client not installed")) try: client = glanceclient.Client(version=2, endpoint=glance_endpoint, token=context.auth_token) legacy_image = client.images.get(self.legacy_image_id) except Exception: raise exception.InvalidArtifactPropertyValue( message=_('Unable to get legacy image') ) if legacy_image is not None: self.file = definitions.Blob(size=legacy_image.size, locations=[ { "status": "active", "value": legacy_image.direct_url }], checksum=legacy_image.checksum, item_key=legacy_image.id) else: raise exception.InvalidArtifactPropertyValue( message=_("Legacy image was not found") ) glance-12.0.0/glance/contrib/plugins/image_artifact/setup.py0000664000567000056710000000145512701407047025216 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import setuptools # all other params will be taken from setup.cfg setuptools.setup(packages=setuptools.find_packages(), setup_requires=['pbr'], pbr=True) glance-12.0.0/glance/contrib/plugins/image_artifact/v1/0000775000567000056710000000000012701407204024020 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/v1/__init__.py0000664000567000056710000000000012701407047026124 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/image_artifact/v1/image.py0000664000567000056710000000322412701407047025462 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common.glare import definitions class ImageAsAnArtifact(definitions.ArtifactType): __type_name__ = 'Image' __endpoint__ = 'images' file = definitions.BinaryObject(required=True) disk_format = definitions.String(allowed_values=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'], required=True, mutable=False) container_format = definitions.String(allowed_values=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova', 'docker'], required=True, mutable=False) min_disk = definitions.Integer(min_value=0, default=0) min_ram = definitions.Integer(min_value=0, default=0) virtual_size = definitions.Integer(min_value=0) glance-12.0.0/glance/contrib/plugins/artifacts_sample/0000775000567000056710000000000012701407204024054 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/artifacts_sample/setup.cfg0000664000567000056710000000133412701407047025703 0ustar jenkinsjenkins00000000000000[metadata] name = artifact version = 0.0.1 description = A sample plugin for artifact loading author = Inessa Vasilevskaya author-email = ivasilevskaya@mirantis.com classifier = Development Status :: 3 - Alpha License :: OSI Approved :: Apache Software License Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.2 Programming Language :: Python :: 3.3 Intended Audience :: Developers Environment :: Console [global] setup-hooks = pbr.hooks.setup_hook [entry_points] glance.artifacts.types = MyArtifact = glance.contrib.plugins.artifacts_sample:MY_ARTIFACT glance-12.0.0/glance/contrib/plugins/artifacts_sample/__init__.py0000664000567000056710000000030312701407047026166 0ustar jenkinsjenkins00000000000000from glance.contrib.plugins.artifacts_sample.v1 import artifact as art1 from glance.contrib.plugins.artifacts_sample.v2 import artifact as art2 MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact] glance-12.0.0/glance/contrib/plugins/artifacts_sample/base.py0000664000567000056710000000226112701407047025346 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common.glare import definitions class BaseArtifact(definitions.ArtifactType): __type_version__ = "1.0" prop1 = definitions.String() prop2 = definitions.Integer() int_list = definitions.Array(item_type=definitions.Integer(max_value=10, min_value=1)) depends_on = definitions.ArtifactReference(type_name='MyArtifact') references = definitions.ArtifactReferenceList() image_file = definitions.BinaryObject() screenshots = definitions.BinaryObjectList() glance-12.0.0/glance/contrib/plugins/artifacts_sample/v2/0000775000567000056710000000000012701407204024403 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/artifacts_sample/v2/__init__.py0000664000567000056710000000000012701407047026507 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/artifacts_sample/v2/artifact.py0000664000567000056710000000156212701407047026563 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common.glare import definitions from glance.contrib.plugins.artifacts_sample import base class MyArtifact(base.BaseArtifact): __type_version__ = "2.0" depends_on = definitions.ArtifactReference(type_name="MyArtifact") glance-12.0.0/glance/contrib/plugins/artifacts_sample/setup.py0000664000567000056710000000145512701407047025600 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import setuptools # all other params will be taken from setup.cfg setuptools.setup(packages=setuptools.find_packages(), setup_requires=['pbr'], pbr=True) glance-12.0.0/glance/contrib/plugins/artifacts_sample/v1/0000775000567000056710000000000012701407204024402 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/artifacts_sample/v1/__init__.py0000664000567000056710000000000012701407047026506 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/contrib/plugins/artifacts_sample/v1/artifact.py0000664000567000056710000000140112701407047026552 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.contrib.plugins.artifacts_sample import base class MyArtifact(base.BaseArtifact): __type_version__ = "1.0.1" glance-12.0.0/glance/i18n.py0000664000567000056710000000212512701407047016550 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_i18n import * # noqa _translators = TranslatorFactory(domain='glance') # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical glance-12.0.0/glance/locale/0000775000567000056710000000000012701407204016651 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/ru/0000775000567000056710000000000012701407204017277 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/ru/LC_MESSAGES/0000775000567000056710000000000012701407204021064 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/ru/LC_MESSAGES/glance.po0000664000567000056710000043135712701407047022677 0ustar jenkinsjenkins00000000000000# Lucas Palm , 2015. #zanata # Grigory Mokhin , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev41\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-12 00:22+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-10 08:54+0000\n" "Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s должен быть строкой" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s является обязательным" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s не может быть длиннее %(length)i" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s не может быть короче %(length)i" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s должен соответствовать шаблону %(pattern)s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "" "В последнем вызове rpc возникла исключительная ситуация %(cls)s: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s не найден в списке элементов образа %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) работает..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s уже запущен: %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s зарегистрирована как модуль дважды. %(module)s не используется." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "Служба %(task_id)s типа %(task_type)s настроена неправильно. Не удалось " "загрузить хранилище в файловой системе" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "Служба %(task_id)s типа %(task_type)s настроена неправильно. Отсутствует " "рабочий каталог: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)s на %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)s %(serv)s с %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Укажите пару host:port, где host - это адрес IPv4, адрес IPv6, имя хоста " "или FQDN. При указании адреса IPv6 заключите его в квадратные скобки " "отдельно от порта (например, \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s не может содержать символы в кодировке 4-байтового unicode." #, python-format msgid "%s is already stopped" msgstr "%s уже остановлен" #, python-format msgid "%s is stopped" msgstr "%s остановлен" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "" "Значение %(param)s выходит за пределы диапазона, оно не должно превышать " "%(max)d" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "Опция --os_auth_url или переменная среды OS_AUTH_URL требуется, если " "включена стратегия идентификации Keystone\n" msgid "A body is not expected with this request." msgstr "В этом запросе не должно быть тела." msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "Список разрешенных артефактов в формате имя или имя-версия. Пустой список " "означает, что все артефакты могут быть загружены." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Объект определения метаданных с именем %(object_name)s уже существует в " "пространстве имен %(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Свойство определения метаданных с именем %(property_name)s уже существует в " "пространстве имен %(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Тип ресурса определения метаданных с именем %(resource_type_name)s уже " "существует." #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "Тег метаданных с именем %(name)s уже существует в пространстве имен " "%(namespace_name)s." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Набор URL для доступа к файлу образа, находящемуся во внешнем хранилище" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "Ключ AES для шифрования метаданных location хранилища. Если используется, то " "включает идентификационные данные Swift или S3. Значение должно быть " "случайной строкой длиной 16, 24 или 32 байт" msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" "Адрес привязки сервера. Полезен при выборе конкретного сетевого интерфейса." msgid "Address to find the registry server." msgstr "Адрес для поиска сервера реестра." msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "Разрешить неидентифицированным пользователям доступ к API с правами только " "на чтение. Это применимо только при использовании ContextMiddleware." #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "Разрешенные значения %s недопустимы для данных агентов проверки" msgid "Amount of disk space (in GB) required to boot image." msgstr "Объем дисковой памяти (в ГБ), необходимой для загрузки образа." msgid "Amount of ram (in MB) required to boot image." msgstr "Объем оперативной памяти (в МБ), необходимой для загрузки образа." msgid "An identifier for the image" msgstr "Идентификатор образа" msgid "An identifier for the image member (tenantId)" msgstr "Идентификатор участника образа (tenantId)" msgid "An identifier for the owner of this task" msgstr "Идентификатор владельца задачи" msgid "An identifier for the task" msgstr "Идентификатор задачи" msgid "An image file url" msgstr "url файла образа" msgid "An image schema url" msgstr "url схемы образа" msgid "An image self url" msgstr "Собственный url образа" #, python-format msgid "An image with identifier %s already exists" msgstr "Образ с идентификатором %s уже существует" msgid "An import task exception occurred" msgstr "Исключительная ситуация в задаче импорта" msgid "An object with the same identifier already exists." msgstr "Объект с таким идентификатором уже существует." msgid "An object with the same identifier is currently being operated on." msgstr "Объект с таким идентификатором занят в текущей операции." msgid "An object with the specified identifier was not found." msgstr "Объект с указанным идентификатором не найден." msgid "An unknown exception occurred" msgstr "Возникла неизвестная исключительная ситуация" msgid "An unknown task exception occurred" msgstr "Непредвиденная исключительная ситуация" #, python-format msgid "Array has no element at position %d" msgstr "Массив не содержит элементов в позиции %d" msgid "Array property can't have item_type=Array" msgstr "Свойство массива не может иметь item_type=Array" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "Не удалось удалить артефакт %s, так как он используется: %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "Артефакт не может изменить состояние с %(source)s на %(target)s" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "Артефакт превышает квоту хранилища: %s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "Артефакт не имеет параметра %(prop)s" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "Состояние артефакта не может быть изменено с %(curr)s на %(to)s" #, python-format msgid "Artifact storage media is full: %s" msgstr "Носитель хранилища артефактов заполнен: %s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "Тип артефакта с именем '%(name)s' и версией '%(version)s' не известен" msgid "Artifact with a circular dependency can not be created" msgstr "Нельзя создать артефакт с циклической зависимостью" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "Артефакт с id=%(id)s не доступен" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "Артефакт с id=%(id)s не найден" msgid "Artifact with the specified type, name and version already exists" msgstr "Артефакт с указанными типом, именем и версией уже существует" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "Артефакт с указанными типом, именем и версией уже имеет прямую зависимость=" "%(dep)s" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "Артефакт с указанными типом, именем и версией уже имеет транзитивную " "зависимость=%(dep)s" msgid "Attempt to set readonly property" msgstr "Произведена попытка установить свойство, доступное только для чтения" msgid "Attempt to set value of immutable property" msgstr "Произведена попытка установить значение неизменяемого свойства" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "Попытка загрузить дубликат образа: %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "Предпринята попытка обновить поле Расположение для образа, не находящегося в " "очереди." #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "Атрибут '%(property)s' предназначен только для чтения." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "Атрибут '%(property)s' зарезервирован." #, python-format msgid "Attribute '%s' is read-only." msgstr "Атрибут '%s' предназначен только для чтения." #, python-format msgid "Attribute '%s' is reserved." msgstr "Атрибут '%s' зарезервирован." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "container_format атрибута может быть заменен только для образа, находящегося " "в очереди." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "disk_format атрибута может быть заменен только для образа, находящегося в " "очереди." msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" "Идентификационный ключ для идентификации пользователя службой Swift. " "(Устарело)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Служба идентификации с URL %(url)s не найдена." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Ошибка идентификации. Возможно, время действия маркера истекло во время " "загрузки файла. Данные образа для %s будут удалены." msgid "Authorization failed." msgstr "Доступ не предоставлен." msgid "Available categories:" msgstr "Доступные категории:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Недопустимый формат фильтра запроса \"%s\". Используйте нотацию DateTime ISO " "8601." #, python-format msgid "Bad Command: %s" msgstr "Неправильная команда: %s" #, python-format msgid "Bad header: %(header_name)s" msgstr "Неправильный заголовок: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "Фильтру %(filter)s передано неверное значение, получено %(val)s" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "Неправильно сформированный URI S3: %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "" "Неправильно сформированные идентификационные данные '%(creds)s' в URI Swift" msgid "Badly formed credentials in Swift URI." msgstr "Неправильно сформированные идентификационные данные в URI Swift." msgid "Base directory that the image cache uses." msgstr "Базовый каталог для кэша образов." msgid "BinaryObject property cannot be declared mutable" msgstr "Свойство BinaryObject не может быть объявлено как mutable" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "Большой двоичный объект %(name)s может иметь несколько значений" msgid "Blob size is not set" msgstr "Размер большого двоичного объекта не установлен" msgid "Body expected in request." msgstr "В запросе ожидалось тело." msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "Невозможно одновременно указать file и legacy_image_id " msgid "CA certificate file to use to verify connecting clients." msgstr "Файл сертификата CA для проверки подключающихся клиентов." msgid "Cannot be a negative value" msgstr "Значение не может быть отрицательным" msgid "Cannot be a negative value." msgstr "Не может быть отрицательным значением." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "Не удается преобразовать %(key)s '%(value)s' в целое число." msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "" "Невозможно объявить свойство артефакта с зарезервированным именем 'metadata'" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "Невозможно загрузить артефакт '%(name)s'" msgid "Cannot remove last location in the image." msgstr "Нельзя удалять последнее расположение из образа." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "Не удается сохранить данные для образа %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "Список расположений не может быть пустым." msgid "Cannot specify 'max_size' explicitly" msgstr "Невозможно указать 'max_size' явно" msgid "Cannot specify 'min_size' explicitly" msgstr "Невозможно указать 'min_size' явно" msgid "Cannot upload to an unqueued image" msgstr "Невозможно загрузить в образ, не находящийся в очереди" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "Невозможно использовать этот параметр с оператором %(op)s" msgid "Certificate file to use when starting API server securely." msgstr "Файл сертификата для использования при защищенном запуске сервера API." #, python-format msgid "Certificate format not supported: %s" msgstr "Формат сертификата не поддерживается: %s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "Сертификат не действует после: %s UTC" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "Сертификат будет действовать, начиная с: %s UTC" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Проверка контрольной суммой не выполнена. Кэширование образа '%s' прервано." msgid "Client disconnected before sending all data to backend" msgstr "Клиент отключился, отправив не все данные в базовую систему" msgid "Command not found" msgstr "Команда не найдена" msgid "Configuration option was not valid" msgstr "Недопустимая опция конфигурации" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Ошибка соединения или неправильный запрос к службе идентификации с URL " "%(url)s." #, python-format msgid "Constructed URL: %s" msgstr "Сформированный URL: %s" msgid "Container format is not specified." msgstr "Не указан формат контейнера." msgid "Content-Type must be application/octet-stream" msgstr "Content-Type должен быть задан в формате приложение/октет-поток" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Образ %(image_id)s скачан поврежденным" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Не удалось выполнить связывание с %(host)s:%(port)s в течение 30 секунд" msgid "Could not find OVF file in OVA archive file." msgstr "Не найден файл OVF в файле архива OVA." #, python-format msgid "Could not find metadata object %s" msgstr "Не найден объект метаданных %s" #, python-format msgid "Could not find metadata tag %s" msgstr "Не удалось найти тег метаданных %s" #, python-format msgid "Could not find namespace %s" msgstr "Не найдено пространство имен %s" #, python-format msgid "Could not find property %s" msgstr "Не найдено свойство %s" msgid "Could not find required configuration option" msgstr "Обязательная опция конфигурации не найдена" #, python-format msgid "Could not find task %s" msgstr "Задача %s не найдена" #, python-format msgid "Could not update image: %s" msgstr "Не удалось изменить образ: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "В настоящее время пакеты OVA с несколькими дисками не поддерживаются." msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "Список пользовательских агентов проверки должен содержать кортежи " "'(function, message)'" #, python-format msgid "Data for image_id not found: %s" msgstr "Не найдены данные для image_id: %s" msgid "Data supplied was not valid." msgstr "Предоставленные данные недопустимы." msgid "Date and time of image member creation" msgstr "Дата и время создания участника образа" msgid "Date and time of image registration" msgstr "Дата и время регистрации образа" msgid "Date and time of last modification of image member" msgstr "Дата и время последней модификации участника образа" msgid "Date and time of namespace creation" msgstr "Дата и время создания пространства имен" msgid "Date and time of object creation" msgstr "Дата и время создания объекта" msgid "Date and time of resource type association" msgstr "Дата и время связывания типа ресурса" msgid "Date and time of tag creation" msgstr "Дата и время создания тега" msgid "Date and time of the last image modification" msgstr "Дата и время последнего изменения образа" msgid "Date and time of the last namespace modification" msgstr "Дата и время последнего изменения пространства имен" msgid "Date and time of the last object modification" msgstr "Дата и время последнего изменения объекта" msgid "Date and time of the last resource type association modification" msgstr "Дата и время последнего изменения связи типа ресурса" msgid "Date and time of the last tag modification" msgstr "Дата и время последнего изменения тега" msgid "Datetime when this resource was created" msgstr "Дата и время создания ресурса" msgid "Datetime when this resource was updated" msgstr "Дата и время обновления ресурса" msgid "Datetime when this resource would be subject to removal" msgstr "Дата и время планового удаления ресурса" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "Количество элементов, по умолчанию возвращаемое запросом, если это значение " "не указано явно в запросе" msgid "Default value is invalid" msgstr "Неверное значение по умолчанию" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "Попытка загрузить артефакт с превышением квоты отклонена: %s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "Попытка загрузить образ с превышением квоты отклонена: %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "Попытка загрузить образ размером более %d байт отклонена." #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "Сначала необходимо удалить свойство зависимости '%s'" msgid "Dependency relations cannot be mutable" msgstr "Отношения зависимости не могут быть изменяемыми" msgid "Deploy the v1 OpenStack Images API." msgstr "Разверните API образов OpenStack версии 1." msgid "Deploy the v1 OpenStack Registry API." msgstr "Разверните API реестров OpenStack версии 1." msgid "Deploy the v2 OpenStack Images API." msgstr "Разверните API образов OpenStack версии 2." msgid "Deploy the v2 OpenStack Registry API." msgstr "Разверните API реестров OpenStack версии 2." msgid "Descriptive name for the image" msgstr "Описательное имя образа" msgid "Dictionary contains unexpected key(s)" msgstr "Словарь содержит неожиданные ключи" msgid "Dictionary size is greater than maximum" msgstr "Размер словаря больше максимального" msgid "Dictionary size is less than minimum" msgstr "Размер словаря меньше минимального" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "Алгоритм дайджеста, применяемый для цифровой подписи. Команда \"openssl list-" "message-digest-algorithms\" выводит алгоритмы, поддерживаемые версией " "OpenSSL на данной платформе. Примеры: \"sha1\", \"sha256\", \"sha512\" и т. " "д." msgid "Disk format is not specified." msgstr "Не указан формат диска." msgid "Does not match pattern" msgstr "Не соответствует шаблону" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Драйвер %(driver_name)s не удалось правильно настроить. Причина: %(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "Необходимо указать file или legacy_image_id" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Ошибка при декодировании запроса. URL или тело запроса содержат символы, " "которые Glance не способен декодировать" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "Ошибка при выборке элементов образа %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "" "Ошибка в конфигурации хранилища. Добавление артефактов в хранилище отключено." msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Ошибка в конфигурации хранилища. Добавление образов в хранилище отключено." msgid "Error occurred while creating the verifier" msgstr "Ошибка при создании агента проверки" msgid "Error occurred while verifying the signature" msgstr "Ошибка при проверке подписи" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Элемент должен быть задан в формате: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Состояние должно быть указано в формате: {\"status\": \"status\"}" msgid "External source should not be empty" msgstr "Внешний источник не должен быть пустым" #, python-format msgid "External sources are not supported: '%s'" msgstr "Внешние ресурсы не поддерживаются: %s" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "Активировать образ не удалось. Ошибка: %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "Добавить метаданные образа не удалось. Ошибка: %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "Не удалось найти артефакт %(artifact_id)s для удаления" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Найти образ для удаления %(image_id)s не удалось" #, python-format msgid "Failed to find image to delete: %s" msgstr "Найти образ для удаления не удалось: %s" #, python-format msgid "Failed to find image to update: %s" msgstr "Найти образ для обновления не удалось: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Не удалось найти тип ресурса %(resourcetype)s для удаления" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "Инициализировать базу данных кэша образов не удалось. Ошибка: %s" #, python-format msgid "Failed to read %s from config" msgstr "Прочесть %s из конфигурации не удалось" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "Зарезервировать образ не удалось. Ошибка: %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "Обновить метаданные образа не удалось. Ошибка: %s" #, python-format msgid "Failed to upload image %s" msgstr "Загрузить образ %s не удалось" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Загрузить данные образа %(image_id)s не удалось из-за ошибки HTTP: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Загрузить данные образа %(image_id)s не удалось из-за внутренней ошибки: " "%(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Файл %(path)s содержит недопустимый базовый файл %(bfile)s, принудительное " "завершение." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Импорты на основе файлов не разрешены. Используйте нелокальный источник " "данных образа." msgid "File too large" msgstr "Слишком большой файл" msgid "File too small" msgstr "Слишком маленький файл" msgid "Forbidden image access" msgstr "Доступ к образу запрещен" #, python-format msgid "Forbidden to delete a %s image." msgstr "Удалять образ %s запрещено." #, python-format msgid "Forbidden to delete image: %s" msgstr "Удалять образ запрещено: %s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "Запрещено изменять '%(key)s' образа %(status)s." #, python-format msgid "Forbidden to modify '%s' of image." msgstr "Изменять '%s' образа запрещено." msgid "Forbidden to reserve image." msgstr "Резервировать образ запрещено." msgid "Forbidden to update deleted image." msgstr "Обновлять удаленный образ запрещено." #, python-format msgid "Forbidden to update image: %s" msgstr "Обновлять образ запрещено: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "Запрещенная попытка загрузки: %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Запрещенный запрос: пространство имен %s определения метаданных невидимое." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Запрос запрещается, задача %s невидима" msgid "Format of the container" msgstr "Формат контейнера" msgid "Format of the disk" msgstr "Формат диска" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "" "Получение данных большого двоичного объекта %(name)s не выполнено: %(err)s." #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "Получить данные для образа %(id)s не удалось: %(err)s." msgid "Glance client not installed" msgstr "Клиент Glance не установлен" #, python-format msgid "Host \"%s\" is not valid." msgstr "Хост \"%s\" недопустим." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Хост и порт \"%s\" недопустимы." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Информационное сообщение для пользователя добавляется только в " "соответствующих случаях (обычно в случае ошибки)" msgid "If False doesn't trace SQL requests." msgstr "В случае False не трассировать запросы SQL." msgid "If False fully disable profiling feature." msgstr "В случае False полностью отключить функцию профилирования." msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "Если False, сервер возвращает заголовок \"Connection: close\". Если True, " "сервер возвращает \"Connection: Keep-Alive\" в своих ответах. Для того чтобы " "явно закрыть соединение сокета клиента, после того как ответ отправлен и " "успешно прочитан клиентом, необходимо просто присвоить этому параметру " "значение False при создании сервера wsgi." msgid "If true, image will not be deletable." msgstr "Если значение равно true, то образ нельзя будет удалить." msgid "If true, namespace will not be deletable." msgstr "Если true, пространство имен будет неудаляемым." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "Не удается удалить образ %(id)s, так как он используется: %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "Образ %(id)s не найден" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Образ %(image_id)s не найден после загрузки. Возможно, он удален во время " "загрузки: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "Образ %(image_id)s защищен и не может быть удален." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "Образ %s не найден после загрузки. Возможно, он был удален во время " "передачи, выполняется очистка переданных фрагментов." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "" "Образ %s не найден после загрузки. Возможно, он удален во время загрузки." #, python-format msgid "Image %s is deactivated" msgstr "Образ %s деактивирован" #, python-format msgid "Image %s is not active" msgstr "Образ %s неактивен" #, python-format msgid "Image %s not found." msgstr "Образ %s не найден." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "Размер образа превышает квоту хранилища: %s" msgid "Image id is required." msgstr "Требуется ИД образа." msgid "Image is protected" msgstr "Образ защищен" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "" "Превышено предельно допустимое число участников для образа %(id)s: %(e)s:" #, python-format msgid "Image name too long: %d" msgstr "Имя образа слишком длинное: %d" msgid "Image operation conflicts" msgstr "Конфликт операций с образом" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Изменять состояние %(cur_status)s образа на %(new_status)s не разрешается" #, python-format msgid "Image storage media is full: %s" msgstr "Носитель образов переполнен: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Превышено предельно допустимое число тегов для образа %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Неполадка при передаче образа: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "Образ с идентификатором %s уже существует!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "Образ с идентификатором %s удален." #, python-format msgid "Image with identifier %s not found" msgstr "Образ с идентификатором %s не найден" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Не найден образ с заданным ИД %(image_id)s" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Неправильная стратегия идентификации, ожидалось \"%(expected)s\", но " "получено \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Неправильный запрос: %s" msgid "Index is out of range" msgstr "Индекс выходит за пределы диапазона" msgid "Index is required" msgstr "Требуется индекс" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "Ввод не содержит поле %(key)s" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "Недостаточные права для доступа к носителю артефактов: %s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Недостаточные права для доступа к носителю образов: %s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "Недопустимый Content-Type для работы с %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Недопустимый указатель JSON для этого ресурса: '%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "Недопустимый формат сертификата: %s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "" "Недопустимая контрольная сумма '%s': длина не может превышать 32 символа" msgid "Invalid configuration in glance-swift conf file." msgstr "Недопустимая конфигурация в файле конфигурации glance-swift." msgid "Invalid configuration in property protection file." msgstr "Недопустимая конфигурация в файле защиты свойств." #, python-format msgid "Invalid container format '%s' for image." msgstr "Неверный формат контейнера '%s' для образа." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Недопустимый тип содержимого: %(content_type)s" msgid "Invalid dict property type" msgstr "Неверный тип параметра dict" msgid "Invalid dict property type specification" msgstr "Недопустимая спецификация типа параметра dict" #, python-format msgid "Invalid disk format '%s' for image." msgstr "Неверный формат диска '%s' для образа." #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Недопустимое значение фильтра %s. Нет закрывающей кавычки." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Недопустимое значение фильтра %s. Нет запятой после закрывающей кавычки." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Недопустимое значение фильтра %s. Нет запятой перед открывающей кавычкой." #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "Недопустимые заголовки \"Content-Type\": %s" msgid "Invalid image id format" msgstr "Недопустимый формат ИД образа" msgid "Invalid item type specification" msgstr "Недопустимая спецификация типа элемента" #, python-format msgid "Invalid json body: %s" msgstr "Недопустимое тело json: %s" msgid "Invalid jsonpatch request" msgstr "Неверный запрос jsonpatch" msgid "Invalid location" msgstr "Недопустимое расположение" #, python-format msgid "Invalid location %s" msgstr "Неверное расположение %s" #, python-format msgid "Invalid location: %s" msgstr "Недопустимое расположение: %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "Неверный параметр location_strategy: %(name)s. Верные параметры стратегии: " "%(strategies)s" msgid "Invalid locations" msgstr "Недопустимые расположения" #, python-format msgid "Invalid locations: %s" msgstr "Недопустимые расположения: %s" msgid "Invalid marker format" msgstr "Недопустимый формат маркера" msgid "Invalid marker. Image could not be found." msgstr "Недопустимый маркер. Образ не найден." #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "Недопустимый mask_gen_algorithm: %s" #, python-format msgid "Invalid membership association: %s" msgstr "Недопустимая ассоциация членства: %s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "Недопустимое сочетание форматов диска и контейнера. При задании формата " "диска или контейнера равным 'aki', 'ari' или 'ami' форматы контейнера и " "диска должны совпадать." #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Недопустимая операция: `%(op)s`. Допускается одна из следующих операций: " "%(available)s." msgid "Invalid position for adding a location." msgstr "Недопустимая позиция для добавления расположения." msgid "Invalid position for removing a location." msgstr "Недопустимая позиция для удаления расположения." msgid "Invalid property definition" msgstr "Недопустимое определение свойства" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "Недопустимый pss_salt_length: %s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "Недопустимый тип открытого ключа для данного типа ключа подписи: %s" msgid "Invalid reference list specification" msgstr "Неверная спецификация списка указателей" msgid "Invalid referenced type" msgstr "Указан неверный тип" msgid "Invalid request PATCH for work with blob" msgstr "Недопустимый PATCH запроса для работы с объектом blob" msgid "Invalid service catalog json." msgstr "Недопустимый json каталога службы." #, python-format msgid "Invalid signature hash method: %s" msgstr "Недопустимый метод хэша подписи: %s" #, python-format msgid "Invalid signature key type: %s" msgstr "Недопустимый тип ключа подписи: %s" #, python-format msgid "Invalid sort direction: %s" msgstr "Недопустимое направление сортировки: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "Недопустимый ключ сортировки: %(sort_key)s. Если версия типа не задана, " "допускается один из следующих: %(available)s." #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Недопустимый ключ сортировки %(sort_key)s. Допускается один из следующих " "ключей: %(available)s." #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" "Недопустимый ключ сортировки: %(sort_key)s. Нельзя выполнять сортировку по " "этому свойству" #, python-format msgid "Invalid status value: %s" msgstr "Недопустимое значение состояния: %s" #, python-format msgid "Invalid status: %s" msgstr "Недопустимое состояние: %s" #, python-format msgid "Invalid time format for %s." msgstr "Недопустимый формат времени для %s." msgid "Invalid type definition" msgstr "Недопустимое определение типа" #, python-format msgid "Invalid type value: %s" msgstr "Недопустимое значение типа: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Недопустимое обновление. Оно создает пространство имен определения " "метаданных с таким же именем, как у пространства имен %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Недопустимое обновление. Оно создает объект определения метаданных с таким " "же именем, как у объекта %(name)s в пространстве имен %(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Недопустимое обновление. Оно создает объект определения метаданных с таким " "же именем, как у объекта %(name)s в пространстве имен %(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Недопустимое обновление. Оно создает пространство имен определения " "метаданных с таким же именем, как у свойства %(name)s в пространстве имен " "%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Неверное значение '%(value)s' параметра '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Недопустимое значение для опции %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Недопустимое значение видимости: %s" msgid "Is not allowed value" msgstr "Не является разрешенным значением" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "Вероятно, модуль eventlet импортирован до задания %s='yes'. В данный момент " "необходимо отключить eventlet.greendns, если используется ipv6, так как " "eventlet.greendns в настоящее время не работает с адресами ipv6. Убедитесь, " "что библиотека eventlet импортируется не до настройки этих параметров." msgid "It's invalid to provide multiple image sources." msgstr "Указывать несколько источников образов нельзя." msgid "It's not allowed to add locations if locations are invisible." msgstr "Не разрешено добавлять расположения, если они невидимы." msgid "It's not allowed to remove locations if locations are invisible." msgstr "Не разрешено удалять расположения, если они невидимы." msgid "It's not allowed to update locations if locations are invisible." msgstr "Не разрешено обновлять расположения, если они невидимы." msgid "Items have to be unique" msgstr "Элементы должны быть уникальными" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "Путь Json должен начинаться с '/' и заканчиваться не на '/'. 2 " "последовательных символа '/' не разрешены. " msgid "Legacy image was not found" msgstr "Устаревший образ не найден" msgid "Length is greater than maximum" msgstr "Длина больше максимальной" msgid "Length is less than minimum" msgstr "Длина меньше минимальной" msgid "Limit param must be an integer" msgstr "Параметр ограничения должен быть целым числом" msgid "Limit param must be positive" msgstr "Параметр Limit должен быть положительным числом" #, python-format msgid "Limit param must not be higher than %d" msgstr "Параметр ограничения не должен превышать %d" msgid "Limits request ID length." msgstr "Ограничивает длину ИД запроса." msgid "List definitions may hot have defaults" msgstr "Определения списков не могут иметь значений по умолчанию" msgid "List of strings related to the image" msgstr "Список строк, относящихся к образу" msgid "List size is greater than maximum" msgstr "Размер списка больше максимального" msgid "List size is less than minimum" msgstr "Размер списка меньше минимального" msgid "Loop time between checking for new items to schedule for delete." msgstr "" "Время цикла между проверками появления новых элементов, чье удаление " "необходимо запланировать." #, python-format msgid "Malformed Content-Range header: %s" msgstr "Неправильный формат заголовка Content-Range: %s" msgid "Malformed JSON in request body." msgstr "Неправильно сформированный JSON в теле запроса." msgid "Max string length may not exceed 255 characters" msgstr "Максимальная длина строки не может превышать 255 символов" msgid "Maximal age is count of days since epoch." msgstr "Максимальный возраст - число дней с начала эпохи." msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "Максимальный размер строки заголовка сообщений. Возможно, max_header_line " "потребуется увеличить при использовании больших маркеров (как правило, " "созданных API Keystone версии 3 API с большими каталогами)" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" "Максимальное число участников образа. Отрицательное значение определяется " "как отсутствие ограничений." msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Максимальное число расположений, разрешенных в образе. Отрицательное " "значение определяется как неограниченное число." msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "" "Максимальное число свойств, разрешенных в образе. Отрицательное значение " "определяется как неограниченное число." msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Максимальное число тегов, разрешенных в образе. Отрицательное значение " "определяется как отсутствие ограничений." msgid "Maximum permissible number of items that could be returned by a request" msgstr "" "Максимальное разрешенное количество элементов, которое может быть возвращено " "запросом" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Превышено максимальное количество перенаправлений (%(redirects)s)." msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "Максимальный размер образа, который может быть загружен пользователем, " "1099511627776 байт (1 ТБ). ПРЕДУПРЕЖДЕНИЕ: это значение следует увеличивать " "только после тщательной оценки; оно не может превышать 8 ЭБ " "(9223372036854775808)." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Обнаружена копия участника %(member_id)s для образа %(image_id)s" msgid "Member can't be empty" msgstr "Участник не может быть пустым" msgid "Member to be added not specified" msgstr "Добавляемый участник не указан" msgid "Membership could not be found." msgstr "Членство не найдено." #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "Пространство имен %(namespace)s определения метаданных защищено и не может " "быть удален." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "Не найдено пространство имен определения метаданных для ИД %s" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "" "Пространство имен %(namespace_name)s определения метаданных не найдено." #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "Объект %(object_name)s определения метаданных защищен и не может быть удален." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "Не найден объект определения метаданных для ИД %s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "Свойство %(property_name)s определения метаданных защищено и не может быть " "удалено." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "Не найдено свойство определения метаданных для ИД %s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Тип ресурса %(resource_type_name)s определения метаданных являетсясистемным " "типом и не может быть удален." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "Связь типа ресурса %(resource_type)s определения метаданных защищена и не " "может быть удалена." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Тег %(tag_name)s определения метаданных защищен и не может быть удален." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Не найден тег определения метаданных для ИД %s" msgid "Min string length may not be negative" msgstr "Минимальная длина строки не может быть отрицательной" msgid "Minimal rows limit is 1." msgstr "Минимальное число строк равно 1." #, python-format msgid "Missing required credential: %(required)s" msgstr "Отсутствуют обязательные идентификационные данные: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Несколько соответствий службы 'image' для региона %(region)s. Обычно это " "означает, что регион обязателен, но вы его не указали." msgid "Must supply a positive, non-zero value for age." msgstr "" "В качестве возраста необходимо указать положительное число, не равное 0." msgid "Name of the paste configuration file." msgstr "Имя файла вставляемой конфигурации." #, python-format msgid "No artifact found with ID %s" msgstr "Не найдены артефакты с ИД %s" msgid "No authenticated user" msgstr "Нет идентифицированного пользователя" #, python-format msgid "No image found with ID %s" msgstr "Образ с ИД %s не найден" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "Расположение с ИД %(loc)s из образа %(img)s не найдено" msgid "No permission to share that image" msgstr "Нет прав на совместное использование этого образа" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "Модуль для '%(name)s' не загружен" msgid "No property to access" msgstr "Отсутствует свойство для доступа" #, python-format msgid "No such key '%s' in a dict" msgstr "В словаре отсутствует ключ '%s'" #, python-format msgid "Not a blob property '%s'" msgstr "Не является свойством большого двоичного объекта '%s'" msgid "Not a downloadable entity" msgstr "Не является загружаемой сущностью" msgid "Not a list property" msgstr "Не является свойством списка" #, python-format msgid "Not a list property '%s'" msgstr "Не является свойством списка '%s'" msgid "Not a valid value type" msgstr "Недопустимый тип значения" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "Не все зависимости находятся в состоянии '%s'" #, python-format msgid "Not allowed to create members for image %s." msgstr "Не разрешено создавать участников для образа %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Запрещено деактивировать образ в состоянии %s" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Не разрешено удалять участников для образа %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Не разрешено удалять теги для образа %s." #, python-format msgid "Not allowed to list members for image %s." msgstr "Не разрешено выводить список участников для образа %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Запрещено повторно активировать образ в состоянии %s" #, python-format msgid "Not allowed to update members for image %s." msgstr "Не разрешено изменять участников для образа %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Не разрешено изменять теги для образа %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "Загружать данные для образа %(image_id)s не разрешено: %(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "Не является индексом массива '%s'" msgid "Number of sort dirs does not match the number of sort keys" msgstr "Число направлений сортировки не совпадает с числом ключей сортировки" msgid "OVA extract is limited to admin" msgstr "Распаковку OVA может выполнить только администратор" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "Требуемые метаданные OVF не заданы в файле конфигурации ovf-metadata.json. " "Укажите в параметре \"cim_pasd\" список требуемых свойств " "CIM_ProcessorAllocationSettingData." msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "Файл конфигурации \"ovf-metadata.json\" со свойствами OVF не найден." msgid "Old and new sorting syntax cannot be combined" msgstr "Прежний и новый синтаксисы сортировки нельзя смешивать" msgid "Only list indexes are allowed for blob lists" msgstr "Для списков больших двоичных объектов разрешены только индексы списков" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "Операции \"%s\" требуется участник с именем \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Объекты операции должны содержать в точности один участник с именем \"add\", " "\"remove\" или \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Объекты операции должны содержать только один участник с именем \"add\", " "\"remove\" или \"replace\"." msgid "Operations must be JSON objects." msgstr "Операции должны быть объектами JSON." #, python-format msgid "Operator %(op)s is not supported" msgstr "Оператор %(op)s не поддерживается" #, python-format msgid "Original locations is not empty: %s" msgstr "Исходные расположения не пусты: %s" msgid "Owner can't be updated by non admin." msgstr "Обычный пользователь не может изменить владельца." msgid "Owner must be specified to create a tag." msgstr "Для создания тега необходимо указать владельца." msgid "Owner of the image" msgstr "Владелец образа" msgid "Owner of the namespace." msgstr "Владелец пространства имен." msgid "Param values can't contain 4 byte unicode." msgstr "" "Значения параметров не могут содержать символы в кодировке 4-байтового " "unicode." msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "Неполное имя конвейера в файле вставляемой конфигурации, из которого удалено " "имя службы. Например, если имя вставляемого раздела - [pipeline:glance-api-" "keystone], то используется значение \"keystone\"" msgid "Path to the directory where json metadata files are stored" msgstr "Путь к каталогу, где хранятся файлы метаданных json" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" "Имя модуля '%(plugin)s' должно соответствовать имени типа артефакта " "'%(name)s'" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "Указатель `%s` содержит символ \"~\", не входящий в распознаваемую Esc-" "последовательность." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Указатель `%s` содержит смежный \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Указатель `%s` не содержит допустимого маркера." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Указатель `%s` не начинается с \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Указатель `%s` оканчивается на \"/\"." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "" "Указатель содержит символ '~', который не входит в состав распознанной " "escape-последовательности [~0, ~1]." #, python-format msgid "Port \"%s\" is not valid." msgstr "Порт \"%s\" недопустим." msgid "Port the registry server is listening on." msgstr "Порт, через который сервер реестра ведет прием." #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "Слишком большой числовой компонент перед выпуском (макс. %d символов)" msgid "Private key file to use when starting API server securely." msgstr "" "Файл личного ключа для использования при защищенном запуске сервера API." #, python-format msgid "Process %d not running" msgstr "Процесс %d не выполняется" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Свойства %s должны быть заданы до сохранения данных." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "Свойство %(property_name)s не начинается с ожидаемого префикса связи типа " "ресурса '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "Свойство %s уже существует." #, python-format msgid "Property %s does not exist." msgstr "Свойство %s не существует." #, python-format msgid "Property %s may not be removed." msgstr "Свойство %s нельзя удалить." #, python-format msgid "Property %s must be set prior to saving data." msgstr "Свойство %s должно быть задано до сохранения данных." #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "Свойство '%(name)s' не может иметь значение '%(val)s': %(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "Свойство '%s' защищено" msgid "Property names can't contain 4 byte unicode." msgstr "" "Имена свойств не могут содержать символы в кодировке 4-байтового unicode." #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "Правило защиты свойств в операции %(operation)s для правила %(rule)s не " "найдено. Пользователям ни в какой роли не будет разрешено выполнять эту " "операцию." #, python-format msgid "Property's %(prop)s value has not been found" msgstr "Значение %(prop)s параметра не найдено" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "Указанный размер образа должен быть равен сохраненному размеру образа. " "(Указанный размер: %(ps)d, сохраненный размер: %(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "Предоставленный объект не соответствует схеме '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Указано неподдерживаемое состояние задачи: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Указан неподдерживаемый тип задачи: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Описание пространства имен для пользователя." msgid "Public images do not have members." msgstr "У общедоступных образов нет участников." msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "Общий url для конечной точки версий. Значение по умолчанию - None, при " "котором для заполнения базы URL будет использоваться атрибут host_url " "запроса. Если Glance расположен за proxy-сервером, то потребуется указать " "URL proxy-сервера." msgid "Python module path of data access API" msgstr "Путь модуля Python к API доступа к данным" msgid "Received invalid HTTP redirect." msgstr "Получено недопустимое перенаправление HTTP." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Перенаправляется на %(uri)s для предоставления доступа." #, python-format msgid "Registry service can't use %s" msgstr "Служба реестра не может использовать %s" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "Реестр настроен неправильно на сервере API. Причина: %(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "Отношение %(name)s может иметь несколько значений" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Перезагрузка %(serv)s не поддерживается" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Перезагрузка %(serv)s (pid %(pid)s) с сигналом (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Удаление устаревшего файла pid %s" msgid "Request body must be a JSON array of operation objects." msgstr "Тело запроса должно быть массивом JSON объектов операции." msgid "Request must be a list of commands" msgstr "Запрос должен быть списком команд" msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "" "Отсутствуют свойства образа, необходимые для проверки подписи. Проверка " "подписи невозможна." #, python-format msgid "Required store %s is invalid" msgstr "Необходимое хранилище %s недопустимо" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "Имена типов ресурсов должны быть согласованы с типами ресурсов Heat, когда " "это возможно: http://docs.openstack.org/developer/heat/template_guide/" "openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Ответ от Keystone не содержит конечной точки Glance." msgid "Role used to identify an authenticated user as administrator." msgstr "" "Роль, применяемая для определения идентифицированного пользователя в " "качестве администратора." msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "Выполняется как продолжительный процесс. Когда не указан (по умолчанию), " "выполняется операция очистки один раз, затем выполняется выход. Когда " "указан, выход не выполняется и выполняется очистка с интервалом wakeup_time, " "указанным в конфигурации." msgid "Scope of image accessibility" msgstr "Область доступности образа" msgid "Scope of namespace accessibility." msgstr "Область доступности пространства имен." msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "" "Секретный ключ для подписывания сообщений трассировки служб Glance API и " "реестра Glance." #, python-format msgid "Server %(serv)s is stopped" msgstr "Сервер %(serv)s остановлен" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Создать исполнитель сервера не удалось: %(reason)s." msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "Установить общесистемную квоту для каждого пользователя. Это общий объем " "памяти, доступной пользователю во всех системах хранения данных. 0 означает " "отсутствие ограничения. Дополнительно можно задать единицу измерения. " "Допустимые единицы измерения: Б (байт), КБ (килобайт), МБ (мегабайт), ГБ " "(гигабайт) и ТБ (терабайт). Единица измерения по умолчанию - Б (байт). " "Учтите, что единица измерения указывается без пробела и с учетом регистра." #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "Уровень отображения %(shl)s не поддерживается в этой операции" msgid "Signature verification failed" msgstr "Проверка подписи не выполнена." msgid "Signature verification failed." msgstr "Проверка подписи не выполнена." msgid "Size of image file in bytes" msgstr "Размер файла образа в байтах" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Некоторые типы ресурсов допускают более одной пары ключ-значение на " "экземпляр. Например, в Cinder разрешены метаданные пользователей и образов " "для томов. Только метаданные свойств образа обрабатываются Nova " "(планирование или драйверы). Это свойство позволяет целевому объекту " "пространства имен устранить неоднозначность." msgid "Sort direction supplied was not valid." msgstr "Указано недопустимое направление сортировки." msgid "Sort key supplied was not valid." msgstr "Задан недопустимый ключ сортировки." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Задает префикс для данного типа ресурсов. Все свойства в пространстве имен " "должны иметь этот префикс при применении к указанному типу ресурсов. Должен " "использоваться разделитель префикса (например, двоеточие :)." msgid "Specifies which task executor to be used to run the task scripts." msgstr "" "Указывает, чтобы применяемый исполнитель задачи выполнял сценарии задачи." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Состояние должно быть \"pending\", \"accepted\" или \"rejected\"." msgid "Status not specified" msgstr "Состояние не указано" msgid "Status of the image" msgstr "Состояние образа" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "Изменять состояние %(cur_status)s на %(new_status)s не разрешается" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Остановка %(serv)s (pid %(pid)s) с сигналом (%(sig)s)" #, python-format msgid "Store for image_id not found: %s" msgstr "Хранилище для image_id не найдено: %s" #, python-format msgid "Store for scheme %s not found" msgstr "Хранилище для схемы %s не найдено" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "Предоставленный %(attr)s (%(supplied)s) и %(attr)s, сгенерированный из " "загруженного образа (%(actual)s), не совпадают. Образ переводится в " "состояние 'killed'." msgid "Supported values for the 'container_format' image attribute" msgstr "Поддерживаемые значения атрибута образа 'container_format'" msgid "Supported values for the 'disk_format' image attribute" msgstr "Поддерживаемые значения атрибута образа 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Повторное порождение подавлено, поскольку %(serv)s был %(rsn)s." msgid "System SIGHUP signal received." msgstr "Получен системный сигнал SIGHUP." #, python-format msgid "Task '%s' is required" msgstr "Требуется задача '%s'" msgid "Task does not exist" msgstr "Задача не существует" msgid "Task failed due to Internal Error" msgstr "Задача не выполнена из-за внутренней ошибки" msgid "Task was not configured properly" msgstr "Задача неправильно настроена" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Задача с указанным ИД %(task_id)s не найдена" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "Фильтр \"changes-since\" больше недоступен в v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "Указанный файл CA %s не существует" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "Объект образа %(image_id)s, создаваемый с помощью задачи %(task_id)s, больше " "не находится в допустимом состоянии для дальнейшей обработки." msgid "The Store URI was malformed." msgstr "URI хранилища неправильно сформирован." msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "URL службы Keystone. Если \"use_user_token\" не действует и используется " "идентификация Keystone, можно указать URL Keystone." msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "Адрес, по которому принимает служба идентификации Swift. (Устарело)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Пароль администратора. Если \"use_user_token\" не действует, могут быть " "указаны идентификационные данные администратора." msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Имя администратора. Если \"use_user_token\" не действует, могут быть указаны " "идентификационные данные администратора." msgid "The amount of time in seconds to delay before performing a delete." msgstr "Время задержки перед выполнением удаления, в секундах." msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "Время, в течение которого неполный образ может оставаться в кэше перед тем, " "как его удалит агент очистки кэша, если он запущен." msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "" "Базовое значение, которое будет использовано при создании приемного сокета " "TCP." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Указанный файл сертификата %s не существует" msgid "The config file that has the swift account(s)configs." msgstr "Файл конфигурации с учетными записями swift." msgid "The current status of this task" msgstr "Текущее состояние задачи" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "Устройство, на котором размещен каталог %(image_cache_dir)s кэша образов, не " "поддерживает xattr. По-видимому, вам нужно отредактировать fstab, добавив " "опцию user_xattr в соответствующую строку для устройства, на котором " "размещен каталог кэша." msgid "The driver to use for image cache management." msgstr "Драйвер для управления кэшем образов." #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "Недопустимый формат версии %s. Используйте формат semver " msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "Формат, в который будут автоматически преобразовываться образы. При работе с " "RBD должен быть задан как 'raw'" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "Заданный uri недопустим. Укажите допустимый uri из следующего списка " "поддерживаемых uri %(supported)s" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "Имя хоста или IP-адрес процесса pydev, принимающего соединения отладки" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "Образ %s уже присутствует на подчиненном сервере, но наша проверка его не " "обнаружила. Это означает, что у нас нет прав на просмотр всех образов на " "подчиненном сервере." #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "Чересчур большой размер входящего артефакта: %s" #, python-format msgid "The incoming image is too large: %s" msgstr "Чересчур большой размер входящего образа: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Указанный файл ключа %s не существует" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Превышено ограничение по числу разрешенных расположений образа. Указанное " "число: %(attempted)s, максимальное число: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Превышено ограничение по числу разрешенных участников данного образа. " "Указанное число: %(attempted)s, максимальное число: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Превышено ограничение по числу разрешенных свойств образа. Указанное число: " "%(attempted)s, максимальное число: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "Превышено ограничение по числу разрешенных свойств образа. Указанное число: " "%(num)s, максимальное число: %(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "Превышено ограничение по числу разрешенных тегов образа. Указанное число: " "%(attempted)s, максимальное число: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "Расположение %(location)s уже существует" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Данные о расположении содержат недопустимый ИД: %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "Расположение файла защиты свойств. Этот файл содержит правила для защиты " "свойств и роли/стратегии, с ними связанные. Если это значение конфигурации " "не указано, то по умолчанию защита свойств выключена. Если значение указано, " "но файл не найден, то служба не запустится glance-api." #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "Определение метаданных %(record_type)s с именем %(record_name)s не удалено. " "Другие записи все еще ссылаются на него." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "Пространство имен %(namespace_name)s определения метаданных уже существует." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "Объект определения метаданных с именем %(object_name)s не найден в " "пространстве имен %(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "Свойство определения метаданных с именем %(property_name)s не найдено в " "пространстве имен %(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "Связь типа ресурса определения метаданных для типа ресурса" "%(resource_type_name)s и пространства имен %(namespace_name)s уже существует." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "Связь типа ресурса определения метаданных для типа ресурса" "%(resource_type_name)s и пространства имен %(namespace_name)s не найдена." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "Тип ресурса определения метаданных с именем %(resource_type_name)s не найден." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "Тег определения метаданных с именем %(name)s не найден в пространстве имен " "%(namespace_name)s." msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "Режим работы службы. Допустимые значения: serial, parallel." msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "Количество исполнителей дочернего процесса, которые будут созданы для " "обслуживания запросов. Значение по умолчанию будет равно количеству " "доступных CPU." msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "Число параллельных операций, одновременно выполняемых службой. Значение " "может быть больше 1, когда режим работы службы - parallel." msgid "The parameters required by task, JSON blob" msgstr "Параметры, обязательные для задачи JSON blob" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "Путь к файлу сертификата для использования в соединениях SSL с сервером " "реестра (если таковой есть). Или можно присвоить переменной среды " "GLANCE_CLIENT_CERT_FILE путь к файлу сертификата CA" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "Путь к файлу сертификата CA для использования в соединениях SSL с сервером " "реестра (если таковой есть). Или можно присвоить переменной среды " "GLANCE_CLIENT_CA_FILE путь к файлу сертификата CA ." msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "Путь к файлу ключа для использования в соединениях SSL с сервером реестра " "(если таковой есть). Или можно присвоить переменной среды " "GLANCE_CLIENT_KEY_FILE путь к файлу ключа" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "" "Путь к базе данных файлов sqlite, которая будет использована для управления " "кэшем образом." msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "Время в секундах, в течение которого сервер API будет ожидать выполнения " "запроса к реестру. 0 означает отсутствие тайм-аута." msgid "The port on which a pydev process is listening for connections." msgstr "Порт, через который процесс pydev принимает соединения." msgid "The port on which the server will listen." msgstr "Порт, через который сервер будет вести прием." msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "Протокол связи с сервером реестра. Либо http, либо https." #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "Указанное тело %(body)s неверно для данной схемы: %(schema)s" msgid "The provided image is too large." msgstr "Предоставленный образ слишком велик." #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "Указанный путь '%(path)s' неверен: %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "Ссылка на параметры учетных записей и резервного хранения swift по умолчанию " "для использования при добавлении новых образов." msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "Регион службы идентификации. Если \"use_user_token\" не действует и " "используется идентификация Keystone, можно указать имя региона." msgid "The request returned 500 Internal Server Error." msgstr "Запрос возвратил ошибку 500 - Внутренняя ошибка сервера." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "Запрос возвратил ошибку 503 - Служба недоступна. Как правило, это происходит " "при перегруженности службы или другом временном сбое." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "Запрос возвратил ошибку 302 - Множественный выбор. Как правило, это " "означает, что вы не включили индикатор версии в URI запроса.\n" "\n" "Возвращенное тело запроса:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "Запрос возвратил ошибку 413 - Сущность запроса слишком велика. Как правило, " "это означает, что нарушено ограничение на скорость или порог квоты.\n" "\n" "Тело ответа:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "Запрос возвратил непредвиденное состояние: %(status)s.\n" "\n" "Тело ответа:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "Запрошенный образ деактивирован. Загрузка данных образа запрещена." msgid "The result of current task, JSON blob" msgstr "Результат текущей задачи JSON blob" msgid "The signature data was not properly encoded using base64" msgstr "Данные подписи в кодировке base64 содержат ошибку" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "Объем данных %(image_size)s превышает допустимый максимум. Остаток: " "%(remaining)s байт." msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "Размер пула нитей для очистки образов. Значение по умолчанию: один, что " "означает последовательную очистку. Любое значение, превышающее 1, означает " "максимальное число образов, которое можно очищать одновременно." #, python-format msgid "The specified member %s could not be found" msgstr "Указанный участник %s не найден" #, python-format msgid "The specified metadata object %s could not be found" msgstr "Указанный объект метаданных %s не найден" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "Не удалось найти указанный тег метаданных %s" #, python-format msgid "The specified namespace %s could not be found" msgstr "Указанное пространство имен %s не найдено" #, python-format msgid "The specified property %s could not be found" msgstr "Указанное свойство %s не найдено" #, python-format msgid "The specified resource type %s could not be found " msgstr "Указанный тип ресурса %s не найден " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "Состояние расположения удаленного образа может быть равно только " "'pending_delete' или 'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "Состояние расположения удаленного образа может быть равно только " "'pending_delete' или 'deleted'." msgid "The status of this image member" msgstr "Состояние этого участника образа" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "Имена хранилищ данных, используемых для получения порядка параметров " "хранилищ. Имя должно быть зарегистрированным в одном из хранилищ, " "определенных в опции настройки 'stores'. Эта опция будет применена при " "использовании опции 'store_type' в качестве стратегии поиска образа, " "определенной в опции настройки 'location_strategy'." msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "Стратегия идентификации. Если \"use_user_token\" не действует, можно указать " "стратегию идентификации." #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "Целевой участник %(member_id)s уже связан с образом %(image_id)s." msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "Имя арендатора администратора. Если \"use_user_token\" не действует, можно " "указать имя арендатора администратора." msgid "The type of task represented by this content" msgstr "Тип задачи, представленной этим содержимым" msgid "The unique namespace text." msgstr "Уникальный текст пространства имен." msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "Максимальный размер аккумулированного кэша в байтах, при превышении которого " "агент очистки кэша, если он запущен, начинает очистку кэша образов." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Имя пространства имен для пользователя. Используется в пользовательском " "интерфейсе." msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "Пользователь для идентификации службой Swift (устарело)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "Значение для опции сокета TCP_KEEPIDLE. Это время в секундах, в течение " "которого соединение должно простаивать, прежде чем TCP начнет отправлять " "тесты keepalive." #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "Ошибка в %(error_key_name)s %(error_filename)s. Проверьте. Ошибка: %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "Ошибка в %(error_key_name)s %(error_filename)s. Проверьте. Ошибка OpenSSL: " "%(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "Неправильная пара ключей. Убедитесь, что сертификат %(cert_file)s и ключ " "%(key_file)sсоответствуют друг другу. Ошибка OpenSSL: %(ce)s" msgid "There was an error configuring the client." msgstr "При настройке клиента произошла ошибка." msgid "There was an error connecting to a server" msgstr "При подключении к серверу произошла ошибка" msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "Этот файл конфигурации указывает, будут ли \"roles\" или \"policies\" " "использоваться в файле защиты свойств." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Эта операция в настоящее время не разрешена для задач Glance. Они " "автоматически удаляются после достижения срока, указанного в их свойстве " "expires_at." msgid "This operation is currently not permitted on Glance images details." msgstr "" "Эта операция в настоящее время не разрешена для сведений об образах Glance." msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "Значение определяет, какая стратегия будет использована для определения " "модуля location order образа. На данный момент две стратегии упакованы с " "'location_order' и 'store_type' Glance." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Время (ч) существования задачи после успешного выполнения или завершения с " "ошибкой" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "Тайм-аут операций сокета соединений клиента. Если входящее соединение " "простаивает в течение этого времени, оно будет закрыто. Значение '0' " "означает неограниченное ожидание." msgid "Too few arguments." msgstr "Недостаточно аргументов." msgid "Too few locations" msgstr "Слишком мало расположений" msgid "Too many locations" msgstr "Слишком много расположений" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "Общий размер составляет %(size)d байт по %(img_count)d образам" msgid "Turn on/off delayed delete." msgstr "Включить/выключить отложенное удаление." msgid "Type version has to be a valid semver string" msgstr "Версия типа должна быть допустимой строкой semver" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "URI не может содержать больше одного вхождения схемы. Если вы указали URI " "вида swift://user:pass@http://authurl.com/v1/container/obj, то вам нужно " "изменить его так, чтобы использовалась схема swift+http://, например: swift" "+http://user:pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "URL для доступа к файлу образа, находящемуся во внешнем хранилище" msgid "Unable to PUT to non-empty collection" msgstr "Не удалось выполнить операцию PUT в непустой набор" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Не удается создать файл pid %(pid)s. Запущен без прав доступа root?\n" "Возврат к файлу temp, для завершения работы службы %(service)s:\n" " остановить %(file)s %(server)s - pid-файл %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Не удается отфильтровать с использованием неизвестного оператора: '%s'" msgid "Unable to filter on a range with a non-numeric value." msgstr "Отфильтровать по диапазону с нечисловым значением невозможно." msgid "Unable to filter on a unknown operator." msgstr "Не удается отфильтровать с использованием неизвестного оператора." msgid "Unable to filter using the specified operator." msgstr "Не удается отфильтровать с использованием указанного оператора." msgid "Unable to filter using the specified range." msgstr "Отфильтровать согласно указанному диапазону невозможно." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "'%s' не найден в изменении схемы JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Не удалось найти `op` в изменении схемы JSON. Допускается одно из следующих " "значений: %(available)s." msgid "Unable to get legacy image" msgstr "Невозможно получить устаревший образ" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Не удается увеличить предельное значение для дескриптора файлов. Запущен без " "прав доступа root?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Невозможно загрузить %(app_name)s из файла конфигурации %(conf_file)s.\n" "Ошибка: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Не удалось загрузить схему: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Не удается найти/вставить файл конфигурации для %s." msgid "Unable to modify collection in immutable or readonly property" msgstr "" "Невозможно изменить набор в неизменяемом или доступном только для чтения " "параметре" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "Не удается получить сертификат с ИД: %s" msgid "Unable to retrieve request id from context" msgstr "Невозможно извлечь ИД запроса из контекста" msgid "Unable to specify artifact type explicitly" msgstr "Невозможно указать тип артефакта явно" msgid "Unable to specify artifact type version explicitly" msgstr "Невозможно указать версию типа артефакта явно" msgid "Unable to specify version if multiple types are possible" msgstr "Невозможно указать версию, если возможно несколько типов" msgid "Unable to specify version if type is not specified" msgstr "Невозможно указать версию, если тип не указан" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "" "Не удается загрузить данные для дубликата образа %(image_id)s: %(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "" "Не удается проверить подпись, так как алгоритм не поддерживается в системе" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "Не удалось проверить подпись: %(reason)s" msgid "Unauthorized image access" msgstr "Нет прав на доступ к образу" msgid "Unexpected body type. Expected list/dict." msgstr "Непредвиденный тип тела. Ожидался список или словарь." #, python-format msgid "Unexpected response: %s" msgstr "Непредвиденный ответ: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Неизвестная стратегия идентификации: '%s'" #, python-format msgid "Unknown command: %s" msgstr "Неизвестная команда: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Неизвестное направление сортировки, должно быть 'desc' или 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Нераспознанная версия черновика схемы JSON" msgid "Unrecognized changes-since value" msgstr "Нераспознанное значение изменений за период" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "Неподдерживаемый sort_dir. Допустимые значения: %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "Неподдерживаемый sort_key. Допустимые значения: %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "" "Значение %(value)d выходит за пределы диапазона, оно не должно превышать " "%(max)d" msgid "Value is greater than maximum" msgstr "Значение больше максимального" msgid "Value is less than minimum" msgstr "Значение меньше минимального" msgid "Value is required" msgstr "Значение является обязательным" #, python-format msgid "Version component is too large (%d max)" msgstr "Компонент версии слишком большой (макс. %d)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "Недопустимая версия: %(reason)s" msgid "Virtual size of image in bytes" msgstr "Виртуальный размер образа в байтах" msgid "Visibility must be either \"public\" or \"private\"" msgstr "Видимость должна быть \"public\" или \"private\"" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Система ожидала завершения pid %(pid)s (%(file)s) в течение 15 секунд; " "освобождение" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "Когда равно false, артефакты не могут быть загружены, независимо от " "available_plugins. Когда равно true, артефакты могут быть загружены. " msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "При работе сервера в режиме SSL необходимо указать cert_file и key_file в " "файле конфигурации" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "Если эта опция равна true, то она назначает арендатором владельца образа. В " "противном случае владельцем образа будет идентифицированный пользователь, " "отправивший запрос." msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "При использовании SSL в соединениях с сервером реестра не требовать проверки " "в сертификатной компании. Это реестровый эквивалент параметра --insecure " "командной строки в glanceclient для API." msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "Разрешить ли пользователям указывать свойства образа сверх тех, что " "предоставляет схема образа" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "Указывает, включать ли расположения образов в базовой системе в свойства " "образов. Например, если используется хранилище в файловой системе, URL " "\"file:///путь/к/образу\" будет возвращаться пользователю в поле метаданных " "direct_url. Раскрытие информации о расположении хранилища может представлять " "угрозу защите, поэтому данный параметр следует использовать с осторожностью! " "Если параметр задан равным true, то он переопределяет show_image_direct_url." msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "Включать ли расположение хранилища образов в базовой системе в свойства " "образов. Раскрытие местонахождения хранилища может представлять угрозу " "защите, поэтому будьте осторожны при использовании этого параметра!" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "Передавать ли заголовки, содержащие информацию о пользователях и " "арендаторах, при отправке запросов к реестру. Позволяет реестру использовать " "промежуточное программное обеспечение контекста без промежуточного " "программного обеспечения auth_token Keystone, удаляя вызовы к службе " "идентификации Keystone. При использовании этой опции рекомендуется " "установить защищенное соединение между api glance и реестром glance при " "помощи промежуточного программного обеспечения, отличного от auth_token." msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "Осуществлять ли сквозную передачу пользовательского маркера при создании " "запросов в реестр Для предотвращения сбоев, связанных с истечением срока " "действия маркера во время передачи больших данных, рекомендуется присваивать " "этому параметру значение False. Если \"use_user_token\" не используется, " "можно указать идентификационные данные администратора." msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "Рабочий каталог для операций асинхронных задач. Указанный здесь каталог " "будет использоваться для образов (обычно перед их импортом в целевое " "хранилище). Указывая рабочий каталог, убедитесь, что в нем достаточно места " "для эффективного выполнения параллельных задач. В качестве грубой оценки " "можно умножить значение max_workers (или число работающих исполнителей) на " "средний размер образа (например, 500 МБ). Оценка размера образа должна " "производиться на основе среднего размера в развертывании. Примечание: в " "зависимости от выполняемых задач может потребоваться умножить это значение " "на некоторый коэффициент, который зависит от того, что задачи делают. " "Например, может потребоваться удвоить доступный размер, если включено " "преобразование образов. Помните, что это лишь приблизительная оценка, ее " "следует выполнять, исходя из наихудшего сценария, и надо быть готовым к " "тому, что ее результаты могут оказаться неверными." #, python-format msgid "Wrong command structure: %s" msgstr "Неверная структура команды: %s" msgid "You are not authenticated." msgstr "Вы не прошли идентификацию." msgid "You are not authorized to complete this action." msgstr "У вас нет прав на выполнение этого действия." #, python-format msgid "You are not authorized to lookup image %s." msgstr "У вас нет прав доступа для поиска образа %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "У вас нет прав доступа для поиска элементов образа %s." #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "" "У вас нет прав доступа для создания тега в пространстве имен, владельцем " "которого является '%s'" msgid "You are not permitted to create image members for the image." msgstr "Вам не разрешено создавать участники образов для данного образа." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Вам не разрешено создавать образы, принадлежащие '%s'." #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "Нет прав доступа на создание пространства имен, принадлежащего %s." #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "Нет прав доступа на создание объекта, принадлежащего %s." #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "Нет прав доступа на создание свойства, принадлежащего %s." #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "Нет прав доступа на создание resource_type, принадлежащего %s." #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "Вам не разрешено создавать эту задачу с владельцем: %s" msgid "You are not permitted to deactivate this image." msgstr "Вам не разрешено деактивировать этот образ." msgid "You are not permitted to delete this image." msgstr "Вам не разрешено удалять этот образ." msgid "You are not permitted to delete this meta_resource_type." msgstr "Нет прав доступа на удаление этого meta_resource_type." msgid "You are not permitted to delete this namespace." msgstr "Нет прав доступа на удаление этого пространства имен." msgid "You are not permitted to delete this object." msgstr "Нет прав доступа на удаление этого объекта." msgid "You are not permitted to delete this property." msgstr "Нет прав доступа на удаление этого свойства." msgid "You are not permitted to delete this tag." msgstr "У вас нет прав доступа для удаления этого тега." #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "Вам не разрешено изменять '%(attr)s' в этом %(resource)s." #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "Вам не разрешено изменять '%s' в этом образе." msgid "You are not permitted to modify locations for this image." msgstr "Вам не разрешено изменять расположения этого образа." msgid "You are not permitted to modify tags on this image." msgstr "Вам не разрешено изменять теги этого образа." msgid "You are not permitted to modify this image." msgstr "Вам не разрешено изменять этот образ." msgid "You are not permitted to reactivate this image." msgstr "Вам не разрешено повторно активировать этот образ." msgid "You are not permitted to set status on this task." msgstr "Вам не разрешено указывать состояние этой задачи." msgid "You are not permitted to update this namespace." msgstr "Нет прав доступа на обновление этого пространства имен." msgid "You are not permitted to update this object." msgstr "Нет прав доступа на обновление этого объекта." msgid "You are not permitted to update this property." msgstr "Нет прав доступа на обновление этого свойства." msgid "You are not permitted to update this tag." msgstr "У вас нет прав доступа для изменения этого тега." msgid "You are not permitted to upload data for this image." msgstr "Вам не разрешено загружать данные для этого образа." #, python-format msgid "You cannot add image member for %s" msgstr "Невозможно добавить участник образа для %s" #, python-format msgid "You cannot delete image member for %s" msgstr "Невозможно удалить участник образа для %s" #, python-format msgid "You cannot get image member for %s" msgstr "Невозможно получить участник образа для %s" #, python-format msgid "You cannot update image member %s" msgstr "Невозможно обновить участник образа %s" msgid "You do not own this image" msgstr "Этот образ вам не принадлежит" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Вы выбрали применение SSL в соединении и предоставили сертификат, однако вам " "не удалось ни предоставить параметр key_file, ни задать переменную среды " "GLANCE_CLIENT_KEY_FILE" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Вы выбрали применение SSL в соединении и предоставили ключ, однако вам не " "удалось ни предоставить параметр cert_file, ни задать переменную среды " "GLANCE_CLIENT_CERT_FILE" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "В __init__() получен непредвиденный именованный аргумент '%s'" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "не удается выполнить переход от %(current)s к %(next)s при обновлении " "(требуется from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "настраиваемые свойства (%(props)s) конфликтуют с базовыми свойствами" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Для этой платформы отсутствуют центры обработки событий poll и selects " "библиотеки eventlet" msgid "is_public must be None, True, or False" msgstr "Параметр is_public должен быть равен None, True или False" msgid "limit param must be an integer" msgstr "Параметр limit должен быть целым числом" msgid "limit param must be positive" msgstr "Параметр limit должен быть положительным" #, python-format msgid "location: %s data lost" msgstr "Расположение: данные %s утеряны" msgid "md5 hash of image contents." msgstr "Хэш md5 содержимого образа." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "В new_image() получены непредвиденные ключевые слова %s" msgid "protected must be True, or False" msgstr "Параметр protected должен быть равен True или False" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "не удается запустить %(serv)s. Ошибка: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "Слишком большая длина x-openstack-request-id, максимальная длина: %s" glance-12.0.0/glance/locale/glance-log-warning.pot0000664000567000056710000001712612701407047023064 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the glance project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev23\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-09 06:18+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: glance/scrubber.py:312 #, python-format msgid "" "One or more image locations couldn't be scrubbed from backend. Leaving " "image '%s' in 'pending_delete' status" msgstr "" #: glance/api/common.py:167 #, python-format msgid "" "User %(user)s attempted to upload an image of unknown size that will " "exceed the quota. %(remaining)d bytes remaining." msgstr "" #: glance/api/common.py:176 #, python-format msgid "" "User %(user)s attempted to upload an image of size %(size)d that will " "exceed the quota. %(remaining)d bytes remaining." msgstr "" #: glance/api/v1/images.py:756 #, python-format msgid "" "Failed to activate image %s in registry. About to delete image bits from " "store and update status to 'killed'." msgstr "" #: glance/api/v2/images.py:1002 #, python-format msgid "" "Could not find schema properties file %s. Continuing without custom " "properties" msgstr "" #: glance/api/v2/tasks.py:81 #, python-format msgid "Forbidden to create task. Reason: %(reason)s" msgstr "" #: glance/api/v2/tasks.py:121 #, python-format msgid "Failed to find task %(task_id)s. Reason: %(reason)s" msgstr "" #: glance/api/v2/tasks.py:127 #, python-format msgid "Forbidden to get task %(task_id)s. Reason: %(reason)s" msgstr "" #: glance/api/v3/router.py:44 msgid "" "/v3 controller is deprecated and will be removed from glance-api soon. " "Remove the reference to it from glance-api-paste.ini configuration file " "and use Glance Artifact Service API instead" msgstr "" #: glance/async/utils.py:65 #, python-format msgid "An optional task has failed, the failure was: %s" msgstr "" #: glance/async/flows/convert.py:64 #, python-format msgid "" "The conversion format is None, please add a value for it in the config " "file for this task to work: %s" msgstr "" #: glance/cmd/replicator.py:608 #, python-format msgid "" "%(image_id)s: field %(key)s differs (source is %(master_value)s, " "destination is %(slave_value)s)" msgstr "" #: glance/cmd/replicator.py:621 #, python-format msgid "Image %s entirely missing from the destination" msgstr "" #: glance/common/store_utils.py:55 #, python-format msgid "Failed to delete image %s in store from URI" msgstr "" #: glance/common/wsgi.py:340 #, python-format msgid "Unrecognised child %s" msgstr "" #: glance/common/glare/loader.py:120 #, python-format msgid "Can't load artifact %s: load disabled in config" msgstr "" #: glance/common/glare/loader.py:146 #, python-format msgid "Can't load artifact %s: not in available_plugins list" msgstr "" #: glance/common/scripts/image_import/main.py:153 #, python-format msgid "Task %(task_id)s failed with exception %(error)s" msgstr "" #: glance/db/simple/api.py:407 #, python-format msgid "Could not find image %s" msgstr "" #: glance/db/simple/api.py:412 msgid "Unable to get deleted image" msgstr "" #: glance/db/simple/api.py:416 msgid "Unable to get unowned image" msgstr "" #: glance/db/simple/api.py:930 #, python-format msgid "Could not find task %s" msgstr "" #: glance/db/simple/api.py:935 #, python-format msgid "Unable to get deleted task %s" msgstr "" #: glance/db/simple/api.py:1093 #, python-format msgid "Could not find task info %s" msgstr "" #: glance/db/sqlalchemy/api.py:76 msgid "Deadlock detected. Retrying..." msgstr "" #: glance/db/sqlalchemy/api.py:116 msgid "Attempted to modify image user did not own." msgstr "" #: glance/db/sqlalchemy/api.py:350 msgid "Id not in sort_keys; is sort_keys unique?" msgstr "" #: glance/db/sqlalchemy/api.py:1286 #, python-format msgid "Expected table %(tbl)s was not found in DB." msgstr "" #: glance/db/sqlalchemy/glare.py:134 msgid "Artifact with the specified type, name and version already exists" msgstr "" #: glance/db/sqlalchemy/glare.py:240 #, python-format msgid "Artifact with id=%s not found" msgstr "" #: glance/db/sqlalchemy/glare.py:243 #, python-format msgid "Artifact with id=%s is not accessible" msgstr "" #: glance/db/sqlalchemy/glare.py:445 #, python-format msgid "Show level %s is not supported in this operation" msgstr "" #: glance/db/sqlalchemy/glare.py:686 #, python-format msgid "" "Artifact with the specified type, name and versions already has the " "direct dependency=%s" msgstr "" #: glance/db/sqlalchemy/glare.py:716 #, python-format msgid "" "Artifact with the specified type, name and version already has the direct" " dependency=%d" msgstr "" #: glance/db/sqlalchemy/metadata.py:352 glance/db/sqlalchemy/metadata.py:360 #: glance/db/sqlalchemy/metadata.py:369 #, python-format msgid "Duplicate entry for values: %s" msgstr "" #: glance/db/sqlalchemy/metadef_api/tag.py:36 #, python-format msgid "Metadata tag not found for id %s" msgstr "" #: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:86 #, python-format msgid "Failed to decrypt location value for image %(image_id)s" msgstr "" #: glance/domain/__init__.py:496 msgid "The `eventlet` executor has been deprecated. Use `taskflow` instead." msgstr "" #: glance/glare/location.py:122 #, python-format msgid "Failed to delete blob %s in store from URI" msgstr "" #: glance/image_cache/__init__.py:71 #, python-format msgid "" "Image cache driver '%(driver_name)s' failed to load. Got error: " "'%(import_err)s." msgstr "" #: glance/image_cache/__init__.py:92 #, python-format msgid "" "Image cache driver '%(driver_module)s' failed to configure. Got error: " "'%(config_err)s" msgstr "" #: glance/image_cache/prefetcher.py:46 #, python-format msgid "Image '%s' is not active. Not caching." msgstr "" #: glance/image_cache/prefetcher.py:51 #, python-format msgid "No metadata found for image '%s'" msgstr "" #: glance/image_cache/prefetcher.py:79 msgid "Failed to successfully cache all images in queue." msgstr "" #: glance/image_cache/drivers/sqlite.py:328 #, python-format msgid "" "Fetch of cache file failed (%(e)s), rolling back by moving " "'%(incomplete_path)s' to '%(invalid_path)s'" msgstr "" #: glance/image_cache/drivers/sqlite.py:451 #, python-format msgid "Failed to delete file %(path)s. Got error: %(e)s" msgstr "" #: glance/image_cache/drivers/sqlite.py:488 #: glance/image_cache/drivers/xattr.py:439 #, python-format msgid "Cached image file '%s' doesn't exist, unable to delete" msgstr "" #: glance/registry/api/v1/images.py:124 #, python-format msgid "Invalid marker. Image %(id)s could not be found." msgstr "" #: glance/registry/api/v1/images.py:129 glance/registry/api/v1/members.py:74 #: glance/registry/api/v1/members.py:108 glance/registry/api/v1/members.py:228 #: glance/registry/api/v1/members.py:292 #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "" #: glance/registry/api/v1/members.py:115 glance/registry/api/v1/members.py:235 #: glance/registry/api/v1/members.py:299 #, python-format msgid "User lacks permission to share image %(id)s" msgstr "" #: glance/registry/api/v1/members.py:126 glance/registry/api/v1/members.py:143 #: glance/registry/api/v1/members.py:248 #, python-format msgid "Invalid membership association specified for image %(id)s" msgstr "" #: glance/registry/api/v1/members.py:336 #, python-format msgid "Member %(id)s not found" msgstr "" glance-12.0.0/glance/locale/en_GB/0000775000567000056710000000000012701407204017623 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/en_GB/LC_MESSAGES/0000775000567000056710000000000012701407204021410 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/en_GB/LC_MESSAGES/glance-log-info.po0000664000567000056710000001333312701407047024721 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Andi Chandler , 2014 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b2.dev74\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-15 11:52+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-06-23 01:20+0000\n" "Last-Translator: openstackjenkins \n" "Language: en-GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: English (United Kingdom)\n" msgid "" "'metadata_encryption_key' was not specified in the config file or a config " "file was not specified. This means that this migration is a NOOP." msgstr "" "'metadata_encryption_key' was not specified in the config file or a config " "file was not specified. This means that this migration is a NOOP." #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "Access denied to image %(id)s but returning 'not found'" msgid "All workers have terminated. Exiting" msgstr "All workers have terminated. Exiting" #, python-format msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgstr "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgid "Caught keyboard interrupt. Exiting." msgstr "Caught keyboard interrupt. Exiting." #, python-format msgid "Child %d exiting normally" msgstr "Child %d exiting normally" #, python-format msgid "Cleaning up %s after exceeding the quota" msgstr "Cleaning up %s after exceeding the quota" #, python-format msgid "Cleaning up %s after exceeding the quota." msgstr "Cleaning up %s after exceeding the quota." #, python-format msgid "Considering: %s" msgstr "Considering: %s" msgid "Daemon Shutdown on KeyboardInterrupt" msgstr "Daemon Shutdown on KeyboardInterrupt" msgid "Defaulting to SQLite driver." msgstr "Defaulting to SQLite driver." #, python-format msgid "Delete denied for public image %(id)s" msgstr "Delete denied for public image %(id)s" #, python-format msgid "Image %(id)s not found" msgstr "Image %(id)s not found" #, python-format msgid "Image %s is being synced" msgstr "Image %s is being synced" #, python-format msgid "Image %s metadata has changed" msgstr "Image %s metadata has changed" #, python-format msgid "Image cache loaded driver '%s'." msgstr "Image cache loaded driver '%s'." msgid "Initialized gzip middleware" msgstr "Initialised gzip middleware" msgid "Initialized image cache management middleware" msgstr "Initialised image cache management middleware" msgid "Initialized image cache middleware" msgstr "Initialised image cache middleware" #, python-format msgid "Initializing scrubber with configuration: %s" msgstr "Initializing scrubber with configuration: %s" #, python-format msgid "Not queueing image '%s'. Already being written to cache" msgstr "Not queueing image '%s'. Already being written to cache" #, python-format msgid "Not queueing image '%s'. Already cached." msgstr "Not queueing image '%s'. Already cached." #, python-format msgid "Not queueing image '%s'. Already queued." msgstr "Not queueing image '%s'. Already queued." #, python-format msgid "Reaped %(reaped)s %(entry_type)s cache entries" msgstr "Reaped %(reaped)s %(entry_type)s cache entries" #, python-format msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" msgstr "Rejecting image creation request for invalid image id '%(bad_id)s'" #, python-format msgid "Removed invalid cache file %s" msgstr "Removed invalid cache file %s" #, python-format msgid "Removed stalled cache file %s" msgstr "Removed stalled cache file %s" #, python-format msgid "Returning %(funcname)s: %(output)s" msgstr "Returning %(funcname)s: %(output)s" #, python-format msgid "Scrubbing image %(id)s from %(count)d locations." msgstr "Scrubbing image %(id)s from %(count)d locations." #, python-format msgid "Started child %s" msgstr "Started child %s" #, python-format msgid "Starting %d workers" msgstr "Starting %d workers" #, python-format msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgstr "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgid "Starting single process server" msgstr "Starting single process server" #, python-format msgid "Storing: %s" msgstr "Storing: %s" #, python-format msgid "Successfully cached all %d images" msgstr "Successfully cached all %d images" #, python-format msgid "Successfully created image %(id)s" msgstr "Successfully created image %(id)s" #, python-format msgid "Successfully deleted a membership from image %(id)s" msgstr "Successfully deleted a membership from image %(id)s" #, python-format msgid "Successfully deleted image %(id)s" msgstr "Successfully deleted image %(id)s" #, python-format msgid "Successfully updated a membership for image %(id)s" msgstr "Successfully updated a membership for image %(id)s" #, python-format msgid "Successfully updated memberships for image %(id)s" msgstr "Successfully updated memberships for image %(id)s" msgid "Triggering asynchronous copy from external source" msgstr "Triggering asynchronous copy from external source" msgid "Unable to get deleted image" msgstr "Unable to get deleted image" #, python-format msgid "Update denied for public image %(id)s" msgstr "Update denied for public image %(id)s" #, python-format msgid "Updating metadata for image %(id)s" msgstr "Updating metadata for image %(id)s" #, python-format msgid "creating table %(table)s" msgstr "creating table %(table)s" #, python-format msgid "dropping table %(table)s" msgstr "dropping table %(table)s" glance-12.0.0/glance/locale/glance-log-info.pot0000664000567000056710000002171512701407047022351 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the glance project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b2.dev74\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 06:24+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: glance/location.py:406 #, python-format msgid "Successfully verified signature for image %s" msgstr "" #: glance/scrubber.py:211 #, python-format msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgstr "" #: glance/scrubber.py:226 msgid "Daemon Shutdown on KeyboardInterrupt" msgstr "" #: glance/scrubber.py:238 #, python-format msgid "Initializing scrubber with configuration: %s" msgstr "" #: glance/scrubber.py:296 #, python-format msgid "Scrubbing image %(id)s from %(count)d locations." msgstr "" #: glance/scrubber.py:310 #, python-format msgid "Image %s has been scrubbed successfully" msgstr "" #: glance/scrubber.py:324 #, python-format msgid "" "Image location for image '%s' not found in backend; Marking image " "location deleted in db." msgstr "" #: glance/scrubber.py:333 #, python-format msgid "Image %s is scrubbed from a location." msgstr "" #: glance/api/middleware/cache.py:58 msgid "Initialized image cache middleware" msgstr "" #: glance/api/middleware/cache_manage.py:73 msgid "Initialized image cache management middleware" msgstr "" #: glance/api/middleware/gzip.py:35 msgid "Initialized gzip middleware" msgstr "" #: glance/api/v1/images.py:734 #, python-format msgid "Uploaded data of image %s from request payload successfully." msgstr "" #: glance/api/v1/images.py:799 msgid "Triggering asynchronous copy from external source" msgstr "" #: glance/api/v1/upload_utils.py:123 #, python-format msgid "Cleaning up %s after exceeding the quota" msgstr "" #: glance/api/v2/image_actions.py:49 #, python-format msgid "Image %s is deactivated" msgstr "" #: glance/api/v2/image_actions.py:65 #, python-format msgid "Image %s is reactivated" msgstr "" #: glance/api/v2/image_data.py:109 #, python-format msgid "Unable to create trust: %s Use the existing user token." msgstr "" #: glance/api/v2/image_data.py:132 #, python-format msgid "Unable to delete trust %(trust)s: %(msg)s" msgstr "" #: glance/async/flows/base_import.py:374 #, python-format msgid "%(task_id)s of %(task_type)s completed" msgstr "" #: glance/cmd/replicator.py:371 #, python-format msgid "Storing: %s" msgstr "" #: glance/cmd/replicator.py:448 #, python-format msgid "Considering: %s" msgstr "" #: glance/cmd/replicator.py:474 glance/cmd/replicator.py:548 #, python-format msgid "Image %s metadata has changed" msgstr "" #: glance/cmd/replicator.py:554 #, python-format msgid "Image %s is being synced" msgstr "" #: glance/common/wsgi.py:310 #, python-format msgid "Starting %d workers" msgstr "" #: glance/common/wsgi.py:323 #, python-format msgid "Removed dead child %s" msgstr "" #: glance/common/wsgi.py:326 #, python-format msgid "Removed stale child %s" msgstr "" #: glance/common/wsgi.py:338 msgid "All workers have terminated. Exiting" msgstr "" #: glance/common/wsgi.py:355 msgid "Caught keyboard interrupt. Exiting." msgstr "" #: glance/common/wsgi.py:435 #, python-format msgid "Child %d exiting normally" msgstr "" #: glance/common/wsgi.py:440 #, python-format msgid "Started child %s" msgstr "" #: glance/common/wsgi.py:470 msgid "Starting single process server" msgstr "" #: glance/common/artifacts/loader.py:126 glance/common/artifacts/loader.py:150 #, python-format msgid "Artifact %s has been successfully loaded" msgstr "" #: glance/common/scripts/__init__.py:30 #, python-format msgid "Loading known task scripts for task_id %(task_id)s of type %(task_type)s" msgstr "" #: glance/common/scripts/image_import/main.py:36 #, python-format msgid "Task %(task_id)s beginning import execution." msgstr "" #: glance/common/scripts/image_import/main.py:147 #, python-format msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" msgstr "" #: glance/common/scripts/image_import/main.py:156 #, python-format msgid "Task %(task_id)s: Could not import image file %(image_data)s" msgstr "" #: glance/db/simple/api.py:60 #, python-format msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgstr "" #: glance/db/simple/api.py:66 #, python-format msgid "Returning %(funcname)s: %(output)s" msgstr "" #: glance/db/simple/api.py:1987 #, python-format msgid "Could not find artifact %s" msgstr "" #: glance/db/simple/api.py:1991 msgid "Unable to get deleted image" msgstr "" #: glance/db/sqlalchemy/api.py:1275 #, python-format msgid "Purging deleted rows older than %(age_in_days)d day(s) from table %(tbl)s" msgstr "" #: glance/db/sqlalchemy/api.py:1290 #, python-format msgid "Deleted %(rows)d row(s) from table %(tbl)s" msgstr "" #: glance/db/sqlalchemy/metadata.py:161 #, python-format msgid "Table %s has been cleared" msgstr "" #: glance/db/sqlalchemy/metadata.py:235 #, python-format msgid "Overwriting namespace %s" msgstr "" #: glance/db/sqlalchemy/metadata.py:251 #, python-format msgid "Skipping namespace %s. It already exists in the database." msgstr "" #: glance/db/sqlalchemy/metadata.py:342 #, python-format msgid "File %s loaded to database." msgstr "" #: glance/db/sqlalchemy/metadata.py:344 msgid "Metadata loading finished" msgstr "" #: glance/db/sqlalchemy/metadata.py:452 #, python-format msgid "Namespace %(namespace)s saved in %(file)s" msgstr "" #: glance/db/sqlalchemy/migrate_repo/schema.py:100 #, python-format msgid "creating table %(table)s" msgstr "" #: glance/db/sqlalchemy/migrate_repo/schema.py:106 #, python-format msgid "dropping table %(table)s" msgstr "" #: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:66 msgid "" "'metadata_encryption_key' was not specified in the config file or a " "config file was not specified. This means that this migration is a NOOP." msgstr "" #: glance/domain/__init__.py:400 #, python-format msgid "Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" msgstr "" #: glance/image_cache/__init__.py:68 #, python-format msgid "Image cache loaded driver '%s'." msgstr "" #: glance/image_cache/__init__.py:77 glance/image_cache/__init__.py:96 msgid "Defaulting to SQLite driver." msgstr "" #: glance/image_cache/prefetcher.py:83 #, python-format msgid "Successfully cached all %d images" msgstr "" #: glance/image_cache/drivers/sqlite.py:409 #: glance/image_cache/drivers/xattr.py:338 #, python-format msgid "Not queueing image '%s'. Already cached." msgstr "" #: glance/image_cache/drivers/sqlite.py:413 #: glance/image_cache/drivers/xattr.py:342 #, python-format msgid "Not queueing image '%s'. Already being written to cache" msgstr "" #: glance/image_cache/drivers/sqlite.py:418 #: glance/image_cache/drivers/xattr.py:347 #, python-format msgid "Not queueing image '%s'. Already queued." msgstr "" #: glance/image_cache/drivers/sqlite.py:435 #, python-format msgid "Removed invalid cache file %s" msgstr "" #: glance/image_cache/drivers/sqlite.py:449 #, python-format msgid "Removed stalled cache file %s" msgstr "" #: glance/image_cache/drivers/xattr.py:392 #, python-format msgid "Reaped %(reaped)s %(entry_type)s cache entries" msgstr "" #: glance/quota/__init__.py:332 #, python-format msgid "Cleaning up %s after exceeding the quota." msgstr "" #: glance/registry/api/v1/images.py:342 glance/registry/api/v1/images.py:380 #: glance/registry/api/v1/images.py:481 #, python-format msgid "Image %(id)s not found" msgstr "" #: glance/registry/api/v1/images.py:347 glance/registry/api/v1/images.py:376 #: glance/registry/api/v1/images.py:491 #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "" #: glance/registry/api/v1/images.py:368 #, python-format msgid "Successfully deleted image %(id)s" msgstr "" #: glance/registry/api/v1/images.py:371 #, python-format msgid "Delete denied for public image %(id)s" msgstr "" #: glance/registry/api/v1/images.py:408 #, python-format msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" msgstr "" #: glance/registry/api/v1/images.py:420 #, python-format msgid "Successfully created image %(id)s" msgstr "" #: glance/registry/api/v1/images.py:473 #, python-format msgid "Updating metadata for image %(id)s" msgstr "" #: glance/registry/api/v1/images.py:486 #, python-format msgid "Update denied for public image %(id)s" msgstr "" #: glance/registry/api/v1/members.py:196 #, python-format msgid "Successfully updated memberships for image %(id)s" msgstr "" #: glance/registry/api/v1/members.py:269 #, python-format msgid "Successfully updated a membership for image %(id)s" msgstr "" #: glance/registry/api/v1/members.py:316 #, python-format msgid "Successfully deleted a membership from image %(id)s" msgstr "" glance-12.0.0/glance/locale/tr_TR/0000775000567000056710000000000012701407204017703 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/tr_TR/LC_MESSAGES/0000775000567000056710000000000012701407204021470 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/tr_TR/LC_MESSAGES/glance-log-warning.po0000664000567000056710000002005212701407047025507 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b2.dev74\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-15 11:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-01 07:46+0000\n" "Last-Translator: İşbaran Akçayır \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "" "%(image_id)s: field %(key)s differs (source is %(master_value)s, destination " "is %(slave_value)s)" msgstr "" "%(image_id)s: %(key)s alanı (kaynak; %(master_value)s, hedef; " "%(slave_value)s) değerlerinden farklıdır" #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "%(id)s imajına 'not found' değeri geri döndürülerek erişim engellendi" #, python-format msgid "An optional task has failed, the failure was: %s" msgstr "İsteğe bağlı bir görev başarısız oldu, nedeni: %s" #, python-format msgid "Artifact with id=%s is not accessible" msgstr "id=%s kimlikli ürün erişilebilir değil" #, python-format msgid "Artifact with id=%s not found" msgstr "id=%s kimlikli ürün bulunamadı" msgid "Artifact with the specified type, name and version already exists" msgstr "Belirtilen tür, ad ve sürümde ürün zaten var" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%d" msgstr "" "Belirtilen tür, ad ve sürümdeki ürün zaten doğrudan şu bağımlılığa sahip=%d" #, python-format msgid "" "Artifact with the specified type, name and versions already has the direct " "dependency=%s" msgstr "" "Belirtilen tür, ad ve sürümdeki ürün zaten doğrudan şu bağımlılığa sahip=%s" msgid "Attempted to modify image user did not own." msgstr "Sahibi olunmayan imaj kullanıcısının değiştirilmesi denendi." #, python-format msgid "Cached image file '%s' doesn't exist, unable to delete" msgstr "Önbelleğe alınmış imaj dosyası '%s' mevcut değil, silinemedi" #, python-format msgid "Can't load artifact %s: load disabled in config" msgstr "%s ürünü yüklenemiyor: yapılandırmadaki yükleme devre dışı" #, python-format msgid "Can't load artifact %s: not in available_plugins list" msgstr "%s ürünü yüklenemiyor: available_plugin listesinde yok" #, python-format msgid "Could not find image %s" msgstr "%s imajı bulunamadı" #, python-format msgid "" "Could not find schema properties file %s. Continuing without custom " "properties" msgstr "" "Şema özellik dosyası %s bulunamadı. Özel özellikler olmadan devam ediliyor" #, python-format msgid "Could not find task %s" msgstr "%s görevi bulunamadı" #, python-format msgid "Could not find task info %s" msgstr "%s görev bilgisi bulunamadı" msgid "Deadlock detected. Retrying..." msgstr "Ölümcül kilitlenme tespit edildi. Tekrar deneniyor..." #, python-format msgid "Duplicate entry for values: %s" msgstr "Değerler için çift girdi: %s" #, python-format msgid "" "Failed to activate image %s in registry. About to delete image bits from " "store and update status to 'killed'." msgstr "" "Kayıt defterinde %s imajı etkinleştirilemedi. Depodan imaj bitlerini silmek " "ve durumunu 'killed' olarak güncellemek üzeresiniz." #, python-format msgid "Failed to decrypt location value for image %(image_id)s" msgstr "%(image_id)s imajı için konum değeri şifresi çözülemedi" #, python-format msgid "Failed to delete blob %s in store from URI" msgstr "Depodaki URI'den %s blobu silme başarısız oldu" #, python-format msgid "Failed to delete file %(path)s. Got error: %(e)s" msgstr "%(path)s dosyası silme işlemi başarısız oldu. Şu hata alındı: %(e)s" #, python-format msgid "Failed to delete image %s in store from URI" msgstr "Depodaki URI'den %s imajını silme başarısız oldu" #, python-format msgid "Failed to find task %(task_id)s. Reason: %(reason)s" msgstr "%(task_id)s görev bulma başarısız oldu. Nedeni: %(reason)s" msgid "Failed to successfully cache all images in queue." msgstr "Kuyruktaki bütün imajlar başarılı bir şekilde önbelleğe alınamadı." #, python-format msgid "" "Fetch of cache file failed (%(e)s), rolling back by moving " "'%(incomplete_path)s' to '%(invalid_path)s'" msgstr "" "Önbellek dosyasının getirilmesi işlemi başarısız oldu (%(e)s), " "'%(incomplete_path)s' yolundan '%(invalid_path)s' yoluna taşınarak geri " "alınıyor" #, python-format msgid "Forbidden to create task. Reason: %(reason)s" msgstr "Görev oluşturmak yasak. Nedeni: %(reason)s" #, python-format msgid "Forbidden to get task %(task_id)s. Reason: %(reason)s" msgstr "%(task_id)s görevini almak yasak. Nedeni: %(reason)s" msgid "Id not in sort_keys; is sort_keys unique?" msgstr "Id sort_keys değil; sort_keys eşsiz mi?" #, python-format msgid "Image %s entirely missing from the destination" msgstr "%s imajı hedefte tamamen eksik" #, python-format msgid "Image '%s' is not active. Not caching." msgstr "'%s' imajı etkin değil. Önbelleğe alınamıyor." #, python-format msgid "" "Image cache driver '%(driver_module)s' failed to configure. Got error: " "'%(config_err)s" msgstr "" "İmaj önbellek sürücüsü '%(driver_module)s' yapılandırılamadı. Alınan hata: " "'%(config_err)s" #, python-format msgid "" "Image cache driver '%(driver_name)s' failed to load. Got error: " "'%(import_err)s." msgstr "" "İmaj önbellek sürücüsü '%(driver_name)s' yüklenemedi. Alınan hata: " "'%(import_err)s." #, python-format msgid "Invalid marker. Image %(id)s could not be found." msgstr "Geçersiz işaretçi. İmaj %(id)s bulunamadı." #, python-format msgid "Invalid membership association specified for image %(id)s" msgstr "%(id)s imajı için belirtilen geçersiz üye ilişkisi" #, python-format msgid "Member %(id)s not found" msgstr "Üye %(id)s bulunamadı" #, python-format msgid "Metadata tag not found for id %s" msgstr "%s için üst veri etiketi bulunamadı" #, python-format msgid "No metadata found for image '%s'" msgstr "'%s' imajı için hiçbir üst veri bulunamadı" #, python-format msgid "Show level %s is not supported in this operation" msgstr "Bu işlemde desteklenmeyen %s seviyesini göster" #, python-format msgid "Task %(task_id)s failed with exception %(error)s" msgstr "%(task_id)s görevi %(error)s olağandışı durumu ile başarısız oldu" msgid "The `eventlet` executor has been deprecated. Use `taskflow` instead." msgstr "`eventlet` yürütücüsü önerilmiyor. Bunun yerine `taskflow` kullan." #, python-format msgid "" "The conversion format is None, please add a value for it in the config file " "for this task to work: %s" msgstr "" "Dönüştürme biçimi Hiçbiri, lütfen çalıştırılacak bu görev için yapılandırma " "dosyasında dönüştürme biçimi değeri ekleyin: %s" msgid "Unable to get deleted image" msgstr "Silinen imaj alınamadı" #, python-format msgid "Unable to get deleted task %s" msgstr "Silinen görev %s alınamadı" msgid "Unable to get unowned image" msgstr "Sahipsiz imaj alınamadı" #, python-format msgid "Unrecognised child %s" msgstr "Tanınmayan alt %s" #, python-format msgid "" "User %(user)s attempted to upload an image of size %(size)d that will exceed " "the quota. %(remaining)d bytes remaining." msgstr "" "%(user)s kullanıcısı kotayı aşacak %(size)d boyutunda bir imajı yükleme " "girişiminde bulundu. Kalan; %(remaining)d bayt." #, python-format msgid "" "User %(user)s attempted to upload an image of unknown size that will exceed " "the quota. %(remaining)d bytes remaining." msgstr "" "%(user)s kullanıcısı kotayı aşacak bilinmeyen boyutta bir imajı yükleme " "girişiminde bulundu. Kalan; %(remaining)d bayt." #, python-format msgid "User lacks permission to share image %(id)s" msgstr "%(id)s imajını paylaşmak için eksik kullanıcı izni" glance-12.0.0/glance/locale/tr_TR/LC_MESSAGES/glance-log-info.po0000664000567000056710000002063412701407047025003 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b2.dev74\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-15 11:52+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-01 07:45+0000\n" "Last-Translator: İşbaran Akçayır \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "%(task_id)s of %(task_type)s completed" msgstr "%(task_id)s / %(task_type)s tamamlandı" msgid "" "'metadata_encryption_key' was not specified in the config file or a config " "file was not specified. This means that this migration is a NOOP." msgstr "" "'metadata_encryption_key' yapılandırma dosyasında belirtilmemiş ya da " "yapılandırma dosyası belirtilmemiş. Bu, bu taşımanın NOOP olduğu anlamına " "gelir." #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "%(id)s imajına erişim engellendi ancak 'not found' geri döndürülüyor" msgid "All workers have terminated. Exiting" msgstr "Bütün işçiler sonlandırıldı. Çıkılıyor" #, python-format msgid "Artifact %s has been successfully loaded" msgstr "Ürün %s başarılı bir şekilde yüklendi" #, python-format msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgstr "%(funcname)s: args=%(args)s, kwargs=%(kwargs)s çağrılıyor" msgid "Caught keyboard interrupt. Exiting." msgstr "Klavye kesme isteği yakalandı. Çıkılıyor." #, python-format msgid "Child %d exiting normally" msgstr "Alt %d'den normal bir şekilde çıkılıyor" #, python-format msgid "Cleaning up %s after exceeding the quota" msgstr "Kotayı aştıktan sonra %s temizleniyor" #, python-format msgid "Cleaning up %s after exceeding the quota." msgstr "Kota aşıldıktan sonra %s temizleniyor." #, python-format msgid "Considering: %s" msgstr "Değerlendiriliyor: %s" #, python-format msgid "Could not find artifact %s" msgstr "%s ürünü bulunamadı" msgid "Daemon Shutdown on KeyboardInterrupt" msgstr "KeyboardInterrupt üzerinde Artalan İşlem Kapat" msgid "Defaulting to SQLite driver." msgstr "SQLite sürücüsü varsayılıyor." #, python-format msgid "Delete denied for public image %(id)s" msgstr "%(id)s ortak imajı için silme işlemi engellendi" #, python-format msgid "File %s loaded to database." msgstr "%s dosyası veritabanına yüklendi." #, python-format msgid "Image %(id)s not found" msgstr "%(id)s imajı bulunamadı" #, python-format msgid "Image %s is being synced" msgstr "%s imajı eşzamanlandırılıyor" #, python-format msgid "Image %s is deactivated" msgstr "%s imajı devre dışı bırakıldı" #, python-format msgid "Image %s is reactivated" msgstr "%s imajı yeniden etkinleştirildi" #, python-format msgid "Image %s metadata has changed" msgstr "%s imajı üst verisi değiştirildi" #, python-format msgid "Image cache loaded driver '%s'." msgstr "İmaj önbelleği '%s' sürücüsünü yükledi." msgid "Initialized gzip middleware" msgstr "Gzip katmanı başlatıldı" msgid "Initialized image cache management middleware" msgstr "İmaj önbellek yönetim katmanı başlatıldı" msgid "Initialized image cache middleware" msgstr "İmaj önbellek katmanı başlatıldı" #, python-format msgid "Initializing scrubber with configuration: %s" msgstr "Temizleyici yapılandırma ile başlatılıyor: %s" #, python-format msgid "" "Loading known task scripts for task_id %(task_id)s of type %(task_type)s" msgstr "" "%(task_type)s türünün %(task_id)s görev_numarası için bilinen görev " "betikleri yükleniyor" msgid "Metadata loading finished" msgstr "Üst veri yüklemesi bitti" #, python-format msgid "Namespace %(namespace)s saved in %(file)s" msgstr "%(namespace)s ad alanı %(file)s içinde kaydedildi" #, python-format msgid "Not queueing image '%s'. Already being written to cache" msgstr "'%s' imajı kuyruğa alınmıyor. Zaten önbelleğe yazıldı" #, python-format msgid "Not queueing image '%s'. Already cached." msgstr "'%s' imajı kuyruğa alınmıyor. Zaten önbelleğe alındı." #, python-format msgid "Not queueing image '%s'. Already queued." msgstr "'%s' imajı kuyruğa alınmıyor. Zaten kuyruğa alınmıştı." #, python-format msgid "Overwriting namespace %s" msgstr "%s ad alanı üzerine yazılıyor" #, python-format msgid "Reaped %(reaped)s %(entry_type)s cache entries" msgstr "%(reaped)s %(entry_type)s önbellek girdileri kaldırıldı" #, python-format msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" msgstr "" "Geçersiz imaj numarası '%(bad_id)s' için imaj oluşturma isteği reddediliyor" #, python-format msgid "Removed dead child %s" msgstr "Ölü alt %s kaldırıldı" #, python-format msgid "Removed invalid cache file %s" msgstr "Geçersiz ön bellek dosyası %s silindi" #, python-format msgid "Removed stale child %s" msgstr "Bozuk alt %s kaldırıldı" #, python-format msgid "Removed stalled cache file %s" msgstr "Bozuk önbellek dosyası %s silindi" #, python-format msgid "Returning %(funcname)s: %(output)s" msgstr "%(funcname)s geri döndürülüyor: %(output)s" #, python-format msgid "Scrubbing image %(id)s from %(count)d locations." msgstr "%(count)d konumundan %(id)s imajı temizleniyor." #, python-format msgid "Skipping namespace %s. It already exists in the database." msgstr "%s ad alanı geçiliyor. Bu ad alanı veritabanında zaten var." #, python-format msgid "Starting %d workers" msgstr "%d işçi başlatılıyor" #, python-format msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgstr "" "Artalan İşlemleri Başlatılıyor: uyanma_zamanı=%(wakeup_time)s iş " "parçacıkları=%(threads)s" msgid "Starting single process server" msgstr "Tek işlem sunucusu başlatılıyor" #, python-format msgid "Storing: %s" msgstr "Depolanıyor: %s" #, python-format msgid "Successfully cached all %d images" msgstr "Bütün %d imajları başarılı bir şekilde önbelleğe alındı" #, python-format msgid "Successfully created image %(id)s" msgstr "%(id)s imajı başarılı bir şekilde oluşturuldu" #, python-format msgid "Successfully deleted a membership from image %(id)s" msgstr "%(id)s imajından bir üye başarılı bir şekilde silindi" #, python-format msgid "Successfully deleted image %(id)s" msgstr "%(id)s imajı başarılı bir şekilde silindi" #, python-format msgid "Successfully updated a membership for image %(id)s" msgstr "%(id)s imajı için üyeler başarılı bir şekilde güncellendi" #, python-format msgid "Successfully updated memberships for image %(id)s" msgstr "%(id)s imajı için üyeler başarılı bir şekilde güncellendi" #, python-format msgid "Table %s has been cleared" msgstr "%s tablosu temizlendi" #, python-format msgid "Task %(task_id)s beginning import execution." msgstr "İçeri aktarma işlemi başlatan görev %(task_id)s." #, python-format msgid "Task %(task_id)s: Could not import image file %(image_data)s" msgstr "%(task_id)s Görev: %(image_data)s imaj dosyası içeri aktarılamadı" #, python-format msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" msgstr "" "%(task_id)s Görev: İçeri aktarılacak imaj veri uri'si %(data_uri)s alındı" #, python-format msgid "" "Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" msgstr "" "[%(task_id)s] görev durumu %(cur_status)s durumundan %(new_status)s yeni " "duruma değiştiriliyor" msgid "Triggering asynchronous copy from external source" msgstr "Harici kaynaktan eşzamansız kopyalama tetikleniyor" msgid "Unable to get deleted image" msgstr "Silinen imaj alınamadı" #, python-format msgid "Update denied for public image %(id)s" msgstr "%(id)s ortak imajı için güncelleme engellendi" #, python-format msgid "Updating metadata for image %(id)s" msgstr "%(id)s imajı için üst veri güncelleniyor" #, python-format msgid "Uploaded data of image %s from request payload successfully." msgstr "İstek yükünden %s imaj verileri başarılı bir şekilde yüklendi." #, python-format msgid "creating table %(table)s" msgstr "%(table)s tablosu oluşturuluyor" #, python-format msgid "dropping table %(table)s" msgstr "%(table)s tablosu siliniyor" glance-12.0.0/glance/locale/tr_TR/LC_MESSAGES/glance-log-error.po0000664000567000056710000002456412701407047025207 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev23\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-08 22:12+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-17 02:48+0000\n" "Last-Translator: openstackjenkins \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "An error occurred during image.send notification: %(err)s" msgstr "image.send bildirimi sırasında bir hata oluştu: %(err)s" #, python-format msgid "" "An error occurred reading from backend storage for image %(image_id)s: " "%(err)s" msgstr "" "%(image_id)s imajı, artalanda çalışan depolamadan okurken bir hata meydana " "geldi: %(err)s" #, python-format msgid "" "Backend storage for image %(image_id)s disconnected after writing only " "%(bytes_written)d bytes" msgstr "" "%(image_id)s imajı için artalanda çalışan depolama sadece %(bytes_written)d " "bayt yazdıktan sonra bağlantıyı kesti" msgid "Cannot use this parameter with the operator IN" msgstr "Bu parametre IN operatörü ile kullanılamıyor" #, python-format msgid "Caught error: %s" msgstr "Yakalama hatası: %s" msgid "Checksum header is missing." msgstr "Sağlama başlığı eksik." #, python-format msgid "Copy from external source '%(scheme)s' failed for image: %(image)s" msgstr "" "İmaj için '%(scheme)s' harici kaynağından kopyalama başarısız oldu: %(image)s" #, python-format msgid "Couldn't find property protection file %(file)s: %(error)s." msgstr "Özellik koruma dosyası %(file)s bulunamadı: %(error)s." #, python-format msgid "Encountered a malformed property protection rule %(rule)s: %(error)s." msgstr "Bozuk bir özellik koruma kuralı %(rule)s: %(error)s ile karşılaşıldı." #, python-format msgid "Error executing SQLite call. Got error: %s" msgstr "SQLite çağrısı yürütülürken hata. Alınan hata: %s" msgid "" "Error setting up the debug environment. Verify that the option " "pydev_worker_debug_host is pointing to a valid hostname or IP on which a " "pydev server is listening on the port indicated by pydev_worker_debug_port." msgstr "" "Hata ayıklama ortamı kurma hatası. pydev_worker_debug_host seçeneğinin " "geçerli bir bilgisayar adını gösterdiğini ya da pydev sunucu IP'sinin " "pydev_worker_debug_port ile gösterilen bağlantı noktasını dinlediğini " "onaylayın." #, python-format msgid "Error: cooperative_iter exception %s" msgstr "Hata: imeceli_yineleme hatası %s" #, python-format msgid "" "Exception encountered while tee'ing image '%(image_id)s' into cache: " "%(error)s. Continuing with response." msgstr "" "Önbellekteki '%(image_id)s' imajı boşaltılırken olağandışı durum oluştu: " "%(error)s. Yanıtlarla devam ediyor." #, python-format msgid "Failed to delete blob %(blob_id)s from store: %(exc)s" msgstr "" "Depolama alanından %(blob_id)s blob silme işlemi başarısız oldu: %(exc)s" #, python-format msgid "Failed to delete image %(image_id)s from store: %(exc)s" msgstr "Depodan %(image_id)s imajı silme başarısız oldu: %(exc)s" #, python-format msgid "Failed to delete namespace %(namespace)s " msgstr "Ad alanı %(namespace)s silme işlemi başarısız oldu " #, python-format msgid "Failed to execute introspection %(task_id)s: %(exc)s" msgstr "İçgözlem yürütümü başarısız oldu %(task_id)s: %(exc)s" #, python-format msgid "Failed to execute task %(task_id)s: %(exc)s" msgstr "%(task_id)s görevi yürütülemedi: %(exc)s" #, python-format msgid "Failed to load location strategy module %(module)s: %(e)s" msgstr "%(module)s strateji modülü konumu yüklenemedi: %(e)s" #, python-format msgid "Failed to load the %s executor provided in the config." msgstr "Yapılandırma içinde verilen %s uygulayıcı yüklemesi başarısız oldu." #, python-format msgid "Failed to save task %(task_id)s in DB as task_repo is %(task_repo)s" msgstr "" "Veritabanındaki %(task_id)s görevini %(task_repo)s görev_deposu olarak " "kaydetme başarısız oldu" msgid "Failed to upload image data due to HTTP error" msgstr "HTTP hatası nedeniyle imaj veri yüklemesi başarısız oldu" msgid "Failed to upload image data due to internal error" msgstr "İç hata nedeniyle imaj veri yüklemesi başarısız oldu" #, python-format msgid "Forbidden to create resource type. Reason: %(reason)s" msgstr "Kaynak türü oluşturmak yasak. Nedeni: %(reason)s" #, python-format msgid "" "Glance tried all active locations to get data for blob %s but all have " "failed." msgstr "" "Glance %s blob verilerini almak için etkin tüm konumları denedi ancak tümü " "başarısız oldu." #, python-format msgid "" "Glance tried all active locations to get data for image %s but all have " "failed." msgstr "" "Glance %s imajı verilerini almak için etkin bütün konumları denedi ancak " "tümü başarısız oldu." #, python-format msgid "" "Image cache contained image file for image '%s', however the registry did " "not contain metadata for that image!" msgstr "" "İmaj önbelleği, '%s' imajı için imaj dosyası bulundurur, ancak kayıt defteri " "bu imaj için üst veri içermez!" msgid "Internal error occurred while trying to process task." msgstr "Görevi işlemeye çalışırken dahili bir hata meydana geldi." msgid "Invalid format of swift store config cfg" msgstr "Geçersiz hızlı depo yapılandırma cfg biçimi" #, python-format msgid "Invalid store uri for image: %(image_id)s. Details: %(reason)s" msgstr "İmaj için geçersiz depo uri: %(image_id)s. Ayrıntılar: %(reason)s" #, python-format msgid "" "Invalid value '%s' for 'property_protection_rule_format'. The permitted " "values are 'roles' and 'policies'" msgstr "" "'property_protection_rule_format' için geçersiz değer '%s'. İzin verilen " "değerler 'roles' ve 'policies'" #, python-format msgid "Invalid value for option user_storage_quota: %(users_quota)s" msgstr "user_storage_quota seçeneği için geçersiz değer: %(users_quota)s" #, python-format msgid "Json schema files not found in %s. Aborting." msgstr "Json şema dosyaları %s içinde bulunamadı. Durduruluyor." #, python-format msgid "" "Malformed property protection rule in [%(prop)s] %(op)s=%(perm)s: '@' and " "'!' are mutually exclusive" msgstr "" "[%(prop)s] %(op)s=%(perm)s bozuk özellik koruma kuralı: '@' ve '!' birbirini " "dışlayandır" #, python-format msgid "" "Multiple policies '%s' not allowed for a given operation. Policies can be " "combined in the policy file" msgstr "" "Belirli bir işlem için izin verilmeyen birden fazla ilke '%s'. İlkeler, ilke " "dosyasında birleştirilebilir" #, python-format msgid "Not respawning child %d, cannot recover from termination" msgstr "Tekrar oluşturulmayan %d, bitişten sonra kurtarılamaz" #, python-format msgid "Operator %s is not supported" msgstr "%s operatörü desteklenmiyor" msgid "" "Please provide no more than one option from this list: --prefer_new, --" "overwrite" msgstr "" "Lütfen bu listeden en fazla bir seçenek veriniz: --prefer_new, --overwrite" #, python-format msgid "" "RPC Call Error: %(val)s\n" "%(tb)s" msgstr "" "RPC Çağrı Hatası: %(val)s\n" "%(tb)s" #, python-format msgid "Received HTTP error while uploading image %s" msgstr "%s imajı yüklenirken HTTP hatası alındı" #, python-format msgid "Registry client request %(method)s %(action)s raised %(exc_name)s" msgstr "" "Kayıt defteri istemci isteği %(method)s %(action)s, %(exc_name)s ortaya " "çıkardı" #, python-format msgid "Task ID %s" msgstr "Görev Numarası %s" #, python-format msgid "" "Task [%(task_id)s] status failed to change from %(cur_status)s to " "%(new_status)s" msgstr "" "[%(task_id)s] görev durumu %(cur_status)s durumundan %(new_status)s durumuna " "değiştirilirken başarısız oldu" #, python-format msgid "Task not found for task_id %s" msgstr "%s task_id için görev bulunamadı" #, python-format msgid "" "Task: %(task_id)s failed to import image %(image_id)s to the filesystem." msgstr "Görev: %(task_id)s dosya sistemine %(image_id)s imajını aktaramadı." msgid "" "This execution of Tasks is not setup. Please consult the project " "documentation for more information on the executors available." msgstr "" "Görevlerin yürütümü kurulum değildir. Lütfen mevcut uygulayıcılar hakkında " "daha fazla bilgi için proje belgelerine bakın." #, python-format msgid "" "This task type %(task_type)s is not supported by the current deployment of " "Glance. Please refer the documentation provided by OpenStack or your " "operator for more information." msgstr "" "Bu görev türü %(task_type)s mevcut Glance yaygınlaştırması tarafından " "desteklenmiyor. Daha fazla bilgi için lütfen işletmeninize yada OpenStack " "tarafından sağlanan belgeye başvurunuz." msgid "" "To use --prefer_new or --overwrite you need to combine of these options with " "--merge option." msgstr "" "--prefer_new ya da --overwrite kullanmak için --merge ile bu seçenekleri " "birleştirmeniz gerekir." #, python-format msgid "Unable to create image %s" msgstr "%s imajı oluşturulamadı" #, python-format msgid "Unable to delete image %s" msgstr "%s imajı silinemedi" msgid "Unable to get images" msgstr "İmajlar alınamadı" #, python-format msgid "Unable to kill image %(id)s: " msgstr "İmaj %(id)s öldürülemedi: " #, python-format msgid "Unable to load artifacts: %s" msgstr "Ürünler yüklenemedi: %s" #, python-format msgid "Unable to restore artifact %(artifact_id)s: %(e)s" msgstr "Ürün geri yüklenemedi %(artifact_id)s: %(e)s" #, python-format msgid "Unable to restore image %(image_id)s: %(e)s" msgstr "%(image_id)s imajı geri yüklenemedi: %(e)s" #, python-format msgid "Unable to show image %s" msgstr "%s imajı gösterilemedi" #, python-format msgid "Unable to update image %s" msgstr "%s imajı güncellenemedi" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Bilinmeyen sıralama yönü, 'desc' ya da 'asc' olmalıdır" #, python-format msgid "could not find %s" msgstr "%s bulunamadı" #, python-format msgid "swift config file %(conf_file)s:%(exc)s not found" msgstr "hızlı yapılandırma dosyası %(conf_file)s:%(exc)s bulunamadı" glance-12.0.0/glance/locale/tr_TR/LC_MESSAGES/glance.po0000664000567000056710000027550612701407047023305 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Andreas Jaeger , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev41\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-12 00:22+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-04 12:45+0000\n" "Last-Translator: openstackjenkins \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s karakter dizisi olmalıdır" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s gereklidir" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s %(length)i'den daha uzun olmayabilir" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s %(length)i'den daha kısa olmayabilir" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s %(pattern)s şablonu ile eşleşmelidir" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "Son rpc çağrısında %(cls)s istisnası oluştu: %(val)s" #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) çalıştırılıyor..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s çalışıyor görünüyor: %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s bir birim olarak iki kez kaydedildi. %(module)s kullanılmıyor." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_type)s görev türündeki %(task_id)s düzgün bir şekilde " "yapılandırılamadı. Dosya sistem deposuna yüklenemedi" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_type)s görev türündeki %(task_id)s düzgün bir şekilde " "yapılandırılamadı. Eksik çalışma dizini: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(conf)s ile %(verb)sing %(serv)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Lütfen istemcinin bir IPv4, IPv6 adresi, makine adı ya da FQDN olduğu bir " "istemci:bağlantı noktası çifti belirtin. Eğer IPv6 kullanılırsa, bağlantı " "noktasından ayrı parantez içine alın (örneğin, \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s 4 bayt unicode karakterler içeremez." #, python-format msgid "%s is already stopped" msgstr "%s zaten durdurulmuş" #, python-format msgid "%s is stopped" msgstr "%s durduruldu" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "'%(param)s' değeri aralık dışında, %(max)d değerini geçmemelidir" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "--os_auth_url seçeneği ya da OS_AUTH_URL ortam değişkeni, keystone kimlik " "doğrulama stratejisi etkinken gereklidir\n" msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "Biçim adı ve ad-sürümünde izin verilen ürünlerin listesi. Boş liste her ürün " "yüklenebilir anlamına gelir." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ad=%(object_name)s ile bir metadata tanım nesnesi ad alanında=" "%(namespace_name)s zaten var." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ad=%(property_name)s ile bir metadata tanım özelliği ad alanında=" "%(namespace_name)s zaten mevcut." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Ad=%(resource_type_name)s ile bir metadata tanım kaynak-türü zaten mevcut." #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "Ad=%(name)s ile bir metadata etiketi ad alanında=%(namespace_name)s zaten " "mevcut." msgid "A set of URLs to access the image file kept in external store" msgstr "Harici depoda tutulan imaj dosyasına erişilecek URL kümesi" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "Depo 'location' metadatasını şifrelemek için AES anahtarı. Bu, eğer " "kullanılırsa, Swift ya da S3 kimlik bilgilerini içerir. 16, 24 ya da 32 bayt " "uzunluğunda rastgele bir karakter dizisi ayarlanmış olmalıdır" msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" "Sunucuya bağlanılacak adres. Belli bir ağ arayüzü seçerken kullanışlıdır." msgid "Address to find the registry server." msgstr "Kayıt sunucusunu bulmak için adres." msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "Kimlik doğrulaması yapılamamış kullanıcıların API'lere salt okunur haklarla " "erişmesine izin ver. Bu sadece ContextMiddleware kullanılırken uygulanır." #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "İzin verilen değerler %s verilen doğrulayıcılar altında geçersizdir" msgid "Amount of disk space (in GB) required to boot image." msgstr "İmajı ön yüklemek için gereken disk alanı miktarı (GB olarak)." msgid "Amount of ram (in MB) required to boot image." msgstr "İmaj ön yüklemesi için gereken (MB olarak) bellek miktarı." msgid "An identifier for the image" msgstr "İmaj için bir tanımlayıcı" msgid "An identifier for the image member (tenantId)" msgstr "İmaj üyesi için bir tanımlayıcı (tenantId)" msgid "An identifier for the owner of this task" msgstr "Görevin sahibi için bir tanımlayıcı" msgid "An identifier for the task" msgstr "Görev için bir tanımlayıcı" #, python-format msgid "An image with identifier %s already exists" msgstr "%s belirteçli imaj zaten var" msgid "An object with the same identifier already exists." msgstr "Aynı tanımlayıcı ile bir nesne zaten mevcut." msgid "An object with the same identifier is currently being operated on." msgstr "Aynı tanımlayıcıya sahip bir nesne şu anda işleniyor." msgid "An object with the specified identifier was not found." msgstr "Belirtilen tanımlayıcı ile bir nesne bulunamadı." msgid "An unknown exception occurred" msgstr "Bilinmeyen olağandışı bir durum oluştu" msgid "An unknown task exception occurred" msgstr "Bilinmeyen bir görev olağandışı durumu oluştu" #, python-format msgid "Array has no element at position %d" msgstr "Dizi %d konumunda öge içermez" msgid "Array property can't have item_type=Array" msgstr "Dizi özelliği item_type=Array olamaz" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "Ürün %s kullanımda olduğundan dolayı silinemez: %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "Ürün durumunu %(source)s kaynağından %(target)s hedefine değiştiremez" #, python-format msgid "Artifact has no property %(prop)s" msgstr "Ürün %(prop)s özelliğine sahip değil" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "Ürün durumu %(curr)s durumundan %(to)s durumuna değiştirilemez" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "'%(name)s' ad ve '%(version)s' sürümdeki ürün bilinmiyor" msgid "Artifact with a circular dependency can not be created" msgstr "Döngüsel bağımlılık ile ürün oluşturulamaz" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "id=%(id)s ile ürün erişilebilir değil" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "id=%(id)s ile ürün bulunamadı" msgid "Artifact with the specified type, name and version already exists" msgstr "Belirtilen tür, ad ve sürümde ürün zaten var" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "Belirtilen tür, ad ve sürümde ürün zaten doğrudan bağımlılığa=%(dep)s sahip" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "Belirtilen tür, ad ve sürümdeki ürün, zaten geçişli bağımlılığa=%(dep)s sahip" msgid "Attempt to set readonly property" msgstr "Salt okunur özellik ayarlamayı dene" msgid "Attempt to set value of immutable property" msgstr "Değişmez özelliğin değerini ayarlamaya çalışın" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "Çift imaj yüklemeyi dene: %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "Durumu kuyruğa alınmış olmayan bir imaj için Konum alanı güncellemesi " "denendi." #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "'%(property)s' özniteliği salt okunurdur." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "'%(property)s' özniteliği ayrılmıştır." #, python-format msgid "Attribute '%s' is read-only." msgstr "'%s' özniteliği salt okunurdur." #, python-format msgid "Attribute '%s' is reserved." msgstr "'%s' özniteliği ayrılmıştır." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "container_format özniteliği sadece kuyruğa alınmış bir imaj için " "değiştirilebilir." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "disk_format özniteliği sadece kuyruğa alınmış bir imaj için değiştirilebilir." msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" "Swift kimlik doğrulama servisine dayalı kullanıcı doğrulama için yetkili " "anahtar. (önerilmiyor)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "%(url)s URL'inde kimlik doğrulama servisi bulunamadı." msgid "Authorization failed." msgstr "Yetkilendirme başarısız oldu." msgid "Available categories:" msgstr "Kullanılabilir kategoriler:" #, python-format msgid "Bad Command: %s" msgstr "Hatalı Komut: %s" #, python-format msgid "Bad header: %(header_name)s" msgstr "Kötü başlık: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "%(filter)s süzgecine geçirilen hatalı değer %(val)s var" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "Hatalı oluşturulmuş S3 URI: %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Swift URI içinde hatalı oluşturulmuş kimlik bilgileri '%(creds)s'" msgid "Badly formed credentials in Swift URI." msgstr "Swift URI içinde hatalı oluşturulmuş kimlik bilgileri." #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "Blob %(name)s birden çok değeri olmayabilir" msgid "Blob size is not set" msgstr "Blob boyutu ayarlı değil" msgid "Body expected in request." msgstr "İstekte beklenen vücut." msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "Aynı anda hem dosya hem de legacy_image_id belirtilemeyebilir" msgid "CA certificate file to use to verify connecting clients." msgstr "" "Bağlanan istemcileri doğrulamak için kullanılacak CA sertifika dosyası." msgid "Cannot be a negative value" msgstr "Negatif bir değer olamaz" msgid "Cannot be a negative value." msgstr "Negatif bir değer olamaz." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "%(key)s '%(value)s' imaj değeri bir tam sayıya dönüştürülemez." msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "Ayrılmış ad 'metadata' ile ürün özelliği ifade edilemez" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "'%(name)s' ürünü yüklenemez" #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "%(image_id)s imajı için veri kaydedilemiyor: %(error)s" msgid "Cannot specify 'max_size' explicitly" msgstr "'max_size' Açıkça belirtilemez" msgid "Cannot specify 'min_size' explicitly" msgstr "'min_size' açıkça belirtilemez" msgid "Cannot upload to an unqueued image" msgstr "Kuyruğa alınmamış imaj yüklenemez" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "%(op)s operatörü ile bu parametreler kullanılamaz" msgid "Certificate file to use when starting API server securely." msgstr "API sunucusu güvenli başlatılırken kullanılacak sertifika dosyası." #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Sağlama doğrulama başarısız oldu. '%s' imajını önbelleğe alma işlemi " "durduruldu." msgid "Client disconnected before sending all data to backend" msgstr "" "İstemci tüm verileri art alanda çalışan uygulamaya göndermeden önce " "bağlantıyı kesti" msgid "Command not found" msgstr "Komut bulunamadı" msgid "Configuration option was not valid" msgstr "Yapılandırma seçeneği geçerli değildi." #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "%(url)s URL'indeki kimlik doğrulama servisine bağlantı hatası/hatalı istek." #, python-format msgid "Constructed URL: %s" msgstr "URL inşa edildi: %s" msgid "Container format is not specified." msgstr "Kap biçimi belirtilmemiş." msgid "Content-Type must be application/octet-stream" msgstr "İçerik-Türü uygulama/sekiz bitli bayt akışı olmalıdır" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "%(image_id)s imajı için bozuk imaj indir" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "30 saniyelik denemeden sonra %(host)s:%(port)s bağlanamadı" #, python-format msgid "Could not find metadata object %s" msgstr "Metadata nesnesi %s bulunamadı" #, python-format msgid "Could not find metadata tag %s" msgstr "%s metadata etiketi bulunamadı" #, python-format msgid "Could not find namespace %s" msgstr "%s ad alanı bulunamadı" #, python-format msgid "Could not find property %s" msgstr "%s özelliği bulunamadı" msgid "Could not find required configuration option" msgstr "Gerekli yapılandırma seçeneği bulunamadı" #, python-format msgid "Could not find task %s" msgstr "%s görevi bulunamadı" #, python-format msgid "Could not update image: %s" msgstr "İmaj güncellenemiyor: %s" msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "Özel doğrulayıcılar listesi birden çok öge içermelidir '(function, message)'" msgid "Data supplied was not valid." msgstr "Sağlanan veri geçersizdir." msgid "Date and time of image member creation" msgstr "İmaj üyesi oluşturma tarih ve saati" msgid "Date and time of last modification of image member" msgstr "İmaj üyesi son değişiklik tarih ve saati" msgid "Datetime when this resource was created" msgstr "Bu kaynak oluşturulduğundaki tarih saat" msgid "Datetime when this resource was updated" msgstr "Bu kaynak güncellendiğindeki tarih saat" msgid "Datetime when this resource would be subject to removal" msgstr "Bu kaynağın kaldırılacağı tarih zaman" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "Eğer istekte açıkça belirtilmediyse, bir istekten dönen öge sayısı için " "öntanımlı değer" msgid "Default value is invalid" msgstr "Öntanımlı değer geçersizdir" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "Ürün yükleme girişimi kota aşıldığından dolayı reddediliyor: %s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "İmaj yükleme girişimi kotayı aştığından dolayı reddediliyor: %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "%d bayttan büyük bir imajın yükleme girişimi reddediliyor." #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "İlk önce '%s' bağımlılık özelliğinin silinmesi gerekir" msgid "Dependency relations cannot be mutable" msgstr "Bağımlılık ilişkileri değişken olamaz" msgid "Deploy the v1 OpenStack Images API." msgstr "v1 OpenStack İmajları API dağıt." msgid "Deploy the v1 OpenStack Registry API." msgstr "v1 OpenStack Kayıt Defteri API dağıt." msgid "Deploy the v2 OpenStack Images API." msgstr "v2 OpenStack İmajları API dağıt." msgid "Deploy the v2 OpenStack Registry API." msgstr "v2 OpenStack Kayıt Defteri API dağıt." msgid "Descriptive name for the image" msgstr "İmaj için açıklayıcı ad" msgid "Dictionary contains unexpected key(s)" msgstr "Sözlük beklenmeyen anahtar(lar) içerir" msgid "Dictionary size is greater than maximum" msgstr "Sözlük boyutu azami değerden daha büyüktür" msgid "Dictionary size is less than minimum" msgstr "Sözlük boyutu asgari değerden daha azdır" msgid "Disk format is not specified." msgstr "Disk biçimi belirtilmemiş." msgid "Does not match pattern" msgstr "Desen eşleşmez" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "%(driver_name)s sürücüsü düzgün bir şekilde yapılandırılamadı. Nedeni: " "%(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "Ya bir dosya ya da legacy_image_id belirtilmesi gerekir" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Depolama yapılandırmasında hata. Depolamak için imaj ekleme devre dışıdır." #, python-format msgid "External sources are not supported: '%s'" msgstr "Harici kaynaklar desteklenmiyor: '%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "İmaj etkinleştirme işlemi başarısız oldu. Alınan hata: %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "İmaj metadata ekleme işlemi başarısız oldu. Alınan hata: %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "Silinecek %(artifact_id)s ürünü bulunamadı" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Silinecek %(image_id)s imajını bulma işlemi başarısız oldu" #, python-format msgid "Failed to find image to delete: %s" msgstr "Silinecek imaj bulunamadı: %s" #, python-format msgid "Failed to find image to update: %s" msgstr "Güncellenecek imaj bulma işlemi başarısız oldu: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Silinecek %(resourcetype)s kaynak türü bulma işlemi başarısız oldu" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "İmaj önbellek veritabanı başlatılamadı. Alınan hata: %s" #, python-format msgid "Failed to read %s from config" msgstr "Yapılandırmadan %s okunamadı" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "İmaj ayırma işlemi başarısız oldu. Alınan hata: %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "İmaj metadata güncelleme işlemi başarısız oldu: Alınan hata: %s" #, python-format msgid "Failed to upload image %s" msgstr "%s imajı yükleme işlemi başarısız oldu" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "HTTP hatası nedeniyle %(image_id)s imajı için imaj verisi yüklenemedi: " "%(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Dahili hata nedeniyle %(image_id)s imajı için imaj verisi yüklenemedi: " "%(error)s" msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Dosya tabanlı içeri aktarmlara izin verilmez. Lütfen imaj verilerinin yerel " "olmayan bir kaynağını kullanın." msgid "File too large" msgstr "Dosya çok büyük" msgid "File too small" msgstr "Dosya çok küçük" #, python-format msgid "Forbidden to delete a %s image." msgstr "%s imajını silmek yasak." #, python-format msgid "Forbidden to delete image: %s" msgstr "İmaj silmek yasak: %s" msgid "Forbidden to reserve image." msgstr "İmaj ayırmak yasak." msgid "Forbidden to update deleted image." msgstr "Silinen imajın güncellenmesi yasak." #, python-format msgid "Forbidden to update image: %s" msgstr "İmaj güncellemek yasak: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "Yükleme girişimi yasak: %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "Yasak istek, üstveri tanım ad alanı=%s görünür değil." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Yasak istek, %s görevi görünür değil" msgid "Format of the container" msgstr "Kabın biçimi" msgid "Format of the disk" msgstr "Diskin biçimi" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "blob %(name)s verisi alma işlemi başarısız oldu: %(err)s." #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "%(id)s imaj verisi alınamadı: %(err)s." #, python-format msgid "Host \"%s\" is not valid." msgstr "İstemci \"%s\" geçersizdir." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "İstemci ve bağlantı noktası \"%s\" geçersizdir." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Okunabilir bilgilendirme iletisi sadece uygun olduğunda (genellikle " "başarısızlıkta) dahildir" msgid "If False doesn't trace SQL requests." msgstr "Eğer Yanlışsa SQL istekleri izlenmez." msgid "If False fully disable profiling feature." msgstr "Eğer Yanlışsa, profil özelliği tamamen devre dışı. " msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "Sunucu, eğer Yanlışsa, \"Bağlantı: kapat\" başlığını, eğer Doğruysa, " "\"Bağlantı: Aktif\" yanıtını döndürecek. Yanıt gönderildikten ve istemci " "tarafından başarılı bir şekilde okunduktan sonra, istemci soket bağlantısını " "açıkça kapatmak için, bir wsgi sunucusu oluştururken bu seçeneği sadece " "Yanlış olarak ayarlamanız gerekir." msgid "If true, image will not be deletable." msgstr "Eğer seçiliyse, imaj silinemeyecektir." msgid "If true, namespace will not be deletable." msgstr "Eğer seçiliyse, ad alanı silinemeyecektir." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "%(id)s imajı kullanımda olduğundan dolayı silinemedi: %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "%(id)s imajı bulunamadı" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "%(image_id)s imajı yüklemeden sonra bulunamadı. İmaj yükleme sırasında " "silinmiş olabilir: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "%(image_id)s imajı korumalıdır ve silinemez." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "%s imajı yüklendikten sonra bulunamadı. İmaj yükleme sırasında silinmiş, " "yüklenen parçalar temizlenmiş olabilir." #, python-format msgid "Image %s is deactivated" msgstr "%s imajı devrede değil" #, python-format msgid "Image %s is not active" msgstr "%s imajı etkin değil" #, python-format msgid "Image %s not found." msgstr "%s imajı bulunamadı." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "İmaj depolama kotasını aşar: %s" msgid "Image id is required." msgstr "İmaj kimliği gereklidir." msgid "Image is protected" msgstr "İmaj korumalıdır" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "%(id)s imajı için üye sınırı aşıldı: %(e)s:" #, python-format msgid "Image name too long: %d" msgstr "İmaj adı çok uzun: %d" msgid "Image operation conflicts" msgstr "İmaj işlem çatışmaları" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "%(cur_status)s durumundan %(new_status)s durumuna imaj durum geçişine izin " "verilmez" #, python-format msgid "Image storage media is full: %s" msgstr "İmaj depolama ortamı dolu: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "%(id)s imajı için etiket sınırı aşıldı: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "İmaj yükleme sorunu: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "%s tanımlayıcısı ile imaj zaten mevcut!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "%s tanımlayıcılı imaj silindi." #, python-format msgid "Image with identifier %s not found" msgstr "%s tanımlayıcısı ile imaj bulunamadı" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Verilen %(image_id)s ile imaj bulunamadı" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Hatalı yetki stratejisi, beklenen değer, \"%(expected)s\" ancak alınan " "değer, \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Hatalı istek: %s" msgid "Index is out of range" msgstr "İndis aralık dışındadır" msgid "Index is required" msgstr "İndis gereklidir" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "Girdi '%(key)s' alanı içermez" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "İmaj depolama ortamında yetersiz izinler: %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Bu kaynak için geçersiz JSON işaretçisi: '/%s'" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "Geçersiz sağlama '%s': 32 karakterden uzun olamaz" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift yapılandır dosyasında geçersiz yapılandırma." msgid "Invalid configuration in property protection file." msgstr "Özellik koruma dosyasında geçersiz yapılandırma." #, python-format msgid "Invalid container format '%s' for image." msgstr "İmaj için geçersiz kap biçimi '%s'." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Geçersiz içerik türü %(content_type)s" msgid "Invalid dict property type" msgstr "Geçersiz sözlük özellik türü" msgid "Invalid dict property type specification" msgstr "Geçersiz sözlük özellik türü belirtimi" #, python-format msgid "Invalid disk format '%s' for image." msgstr "İmaj için geçersiz disk biçimi '%s'." msgid "Invalid image id format" msgstr "Geçersiz imaj id biçimi" msgid "Invalid item type specification" msgstr "Geçersiz öge türü belirtimi" #, python-format msgid "Invalid json body: %s" msgstr "Geçersiz json gövdesi: %s" msgid "Invalid jsonpatch request" msgstr "Geçersiz jsonpatch isteği" msgid "Invalid location" msgstr "Geçersiz konum" #, python-format msgid "Invalid location %s" msgstr "Geçersiz konum %s" #, python-format msgid "Invalid location: %s" msgstr "Geçersiz konum: %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "Geçersiz location_strategy seçeneği: %(name)s. Geçerli strateji " "seçenek(leri): %(strategies)s" msgid "Invalid locations" msgstr "Geçersiz konumlar" #, python-format msgid "Invalid locations: %s" msgstr "Geçersiz konumlar: %s" msgid "Invalid marker format" msgstr "Geçersiz işaretçi biçimi" msgid "Invalid marker. Image could not be found." msgstr "Geçersiz işaretçi. İmaj bulunamadı." #, python-format msgid "Invalid membership association: %s" msgstr "Geçersiz üyelik ilişkisi: %s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "Geçersiz disk ve kap biçimleri karışımı. Bir disk ya da kap biçimi 'aki', " "'ari' ya da 'ami' biçimlerinden biri olarak ayarlanırsa, kap ve disk biçimi " "eşleşmelidir." #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Geçersiz işlem: `%(op)s`. Şu seçeneklerden biri olmalıdır: %(available)s." msgid "Invalid position for adding a location." msgstr "Yer eklemek için geçersiz konum." msgid "Invalid position for removing a location." msgstr "Yer kaldırmak için geçersiz konum." msgid "Invalid property definition" msgstr "Geçersiz özellik tanımı" msgid "Invalid reference list specification" msgstr "Geçersiz kaynak listesi belirtimi" msgid "Invalid referenced type" msgstr "Geçersiz kaynak türü" msgid "Invalid service catalog json." msgstr "Geçersiz json servis katalogu." #, python-format msgid "Invalid sort direction: %s" msgstr "Geçersiz sıralama yönü: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "Geçersiz sıralama anahtarı: %(sort_key)s. Eğer tür sürümü ayarlanmazsa, " "şunlardan biri olmalıdır: %(available)s." #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Geçersiz sıralama anahtarı: %(sort_key)s. Şu seçeneklerden biri olmalıdır: " "%(available)s." #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" "Geçersiz sıralama anahtarı: %(sort_key)s. Bu özelliklere göre " "sıralayamazsınız" #, python-format msgid "Invalid status value: %s" msgstr "Geçersiz durum değeri: %s" #, python-format msgid "Invalid status: %s" msgstr "Geçersiz durum: %s" msgid "Invalid type definition" msgstr "Geçersiz tür tanımı" #, python-format msgid "Invalid type value: %s" msgstr "Geçersiz tür değeri: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Geçersiz güncelleme. Aynı %s adıyla çift metadata tanım ad alanı ile " "sonuçlanır" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Geçersiz güncelleme. Ad alanı=%(namespace_name)s içinde aynı ad=%(name)s " "ile çift metadata tanım nesnesi olmasına neden olacaktır." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Geçersiz güncelleme. Ad alanında=%(namespace_name)s aynı ad=%(name)s ile " "çift metadata tanım nesnesi ile sonuçlanır." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Geçersiz güncelleme. Ad alanı=%(namespace_name)s içinde aynı ad=%(name)s ile " "çift metadata tanım özelliği olmasına neden olacaktır." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "" "'%(param)s' parametresi için '%(value)s' geçersiz değeri: %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "%(option)s seçeneği için geçersiz değer: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Geçersiz görünürlük değeri: %s" msgid "Is not allowed value" msgstr "Değere izin verilmez" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "Eventlet modülü %s='yes' ayarından önce içeri aktarılmış görünüyor. Eğer " "eventlet.greendns ipv6 adresleriyle ilişkiyi kestiğinden beri ipv6 " "kullanılıyorsa, şu anda eventlet.greendns'i devre dışı bırakmanız gerekir. " "Lütfen eventlet modülünün bu ayardan önce içeri aktarılmadığından emin olun." msgid "It's invalid to provide multiple image sources." msgstr "Birden fazla imaj kaynağı sağlamak için geçersizdir." msgid "Items have to be unique" msgstr "Ögeler eşsiz olmalıdır" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "Json yolu '/' ile başlamalı, '/' ile sonlanmamalıdır, 2 sonraki '/' izin " "verilmez." msgid "Legacy image was not found" msgstr "Eski imaj bulunamadı" msgid "Length is greater than maximum" msgstr "Uzunluk azami değerden daha büyüktür" msgid "Length is less than minimum" msgstr "Uzunluk asgari değerden daha küçüktür" msgid "Limit param must be an integer" msgstr "Sınır parametresi bir tam sayı olmalıdır" msgid "Limit param must be positive" msgstr "Sınır parametresi pozitif olmalıdır" #, python-format msgid "Limit param must not be higher than %d" msgstr "Sınır parametresi %d değerinden daha yüksek olmamalıdır" msgid "List definitions may hot have defaults" msgstr "Liste tanımları öntanımlılar olmayabilir" msgid "List of strings related to the image" msgstr "İmaj ile ilgili karakter dizilerinin listesi" msgid "List size is greater than maximum" msgstr "Liste boyutu azami değerden daha büyüktür" msgid "List size is less than minimum" msgstr "Liste boyutu asgari değerden daha azdır" msgid "Loop time between checking for new items to schedule for delete." msgstr "Silmek için planlanan yeni ögelerin kontrolü arasındaki döngü süresi." #, python-format msgid "Malformed Content-Range header: %s" msgstr "Bozuk İçerik-Aralık başlığı: %s" msgid "Malformed JSON in request body." msgstr "İstek gövdesinde bozuk JSON." msgid "Max string length may not exceed 255 characters" msgstr "Azami karakter dizisi uzunluğu 255 karakter sınırını aşmayabilir" msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "İleti başlığının kabul edilebilir azami satır boyutu. Büyük jetonlar (tipik " "olarak büyük servis katalogları ile Keystone v3 API tarafından oluşturulur) " "kullanılırken, max_header_line değişkeninin arttırılması gerekebilir" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" "İmaj başına üyelerin azami sayısı. Negatif değerler sınırsız olarak " "değerlendirilir." msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Bir imaj üzerinde izin verilen azami konum sayısı. Negatif değerler sınırsız " "olarak değerlendirilir." msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "" "Bir imaj üzerinde izin verilen azami özellik sayısı. Negatif değerler " "sınırsız olarak değerlendirilir." msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Bir imaj üzerinde izin verilen azami etiket sayısı. Negatif değerler " "sınırsız olarak değerlendirilir." msgid "Maximum permissible number of items that could be returned by a request" msgstr "" "Bir istek tarafından geri döndürülebilen ögelerin izin verilebilir azami " "sayısı" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Yeniden yönlendirmelerin sınırı (%(redirects)s) aşıldı." msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "Bir kullanıcının bayt cinsinde yükleyebileceği azami imaj boyutu. Öntanımlı " "olarak 1099511627776 bayt (1 TB). UYARI: bu değer sadece dikkatli bir " "değerlendirmeden sonra arttırılmalı ve 8 EB (9223372036854775808) altında " "bir değere ayarlanmalıdır." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Üye %(member_id)s %(image_id)s imajı için çoğaltıldı" msgid "Member can't be empty" msgstr "Üye boş olamaz" msgid "Member to be added not specified" msgstr "Eklenecek üye belirtilmemiş" msgid "Membership could not be found." msgstr "Üyelik bulunamadı." #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "Metadata tanım ad alanı %(namespace)s korumalıdır ve silinemez." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "id=%s için metadata tanım ad alanı bulunamadı" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "Metadata tanım ad alanı=%(namespace_name)s bulunamadı." #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "Metadata tanım nesnesi %(object_name)s korumalıdır ve silinemez." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "id=%s için metadata tanım nesnesi bulunamadı" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "Metadata tanım özelliği %(property_name)s korumalıdır ve silinemez." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "id=%s için metadata tanım özelliği bulunamadı" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" " %(resource_type_name)s metadata tanım kaynak-türü sınıflanmış bir sistem " "türüdür ve silinemez." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "Metadata tanım kaynak-tür-ilişkisi %(resource_type)s korumalıdır ve " "silinemez." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "Metadata tanım etiketi %(tag_name)s korumalıdır ve silinemez." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "id=%s için metadata tanım etiketi bulunamadı" msgid "Min string length may not be negative" msgstr "Asgari karakter dizisi uzunluğu negatif olmayabilir" #, python-format msgid "Missing required credential: %(required)s" msgstr "Gerekli olan kimlik eksik: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "%(region)s bölgesi için birden fazla 'image' servisi eşleşir. Bu genellikle, " "bir bölgenin gerekli olduğu ve sağlamadığınız anlamına gelir." msgid "Name of the paste configuration file." msgstr "Yapıştırma yapılandırma dosyanızın adı." #, python-format msgid "No artifact found with ID %s" msgstr "%s ile ürün bulunamadı" msgid "No authenticated user" msgstr "Kimlik denetimi yapılmamış kullanıcı" #, python-format msgid "No image found with ID %s" msgstr "%s bilgileri ile hiçbir imaj bulunamadı" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "%(img)s imajından %(loc)s bilgisi ile hiçbir konum bulunamadı" msgid "No permission to share that image" msgstr "Bu imajı paylaşma izni yok" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "'%(name)s' için hiçbir eklenti yüklenmemişti" msgid "No property to access" msgstr "Erişilecek özellik yok" #, python-format msgid "No such key '%s' in a dict" msgstr "Sözlük içinde '%s' anahtarı yoktur" #, python-format msgid "Not a blob property '%s'" msgstr "'%s' bir blob özelliği değil" msgid "Not a downloadable entity" msgstr "İndirilebilir bir varlık değil" msgid "Not a list property" msgstr "Bir liste özelliği değil" #, python-format msgid "Not a list property '%s'" msgstr "'%s' bir liste özelliği değil" msgid "Not a valid value type" msgstr "Geçerli bir değer türü değildir" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "Bütün bağımlılıklar '%s' durumunda değildir" #, python-format msgid "Not allowed to create members for image %s." msgstr "%s imajı için üye oluşturulmasına izin verilmedi." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "'%s' durumundaki imajın etkinliğini kaldırmaya izin verilmez" #, python-format msgid "Not allowed to delete members for image %s." msgstr "%s imajı için üyelerin silinmesine izin verilmedi." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "%s imajı için etiketlerin silinmesine izin verilmedi." #, python-format msgid "Not allowed to list members for image %s." msgstr "%s imajı için üyelerin listelenmesine izin verilmedi." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "'%s' durumundaki imajı yeniden etkinleştirmeye izin verilmez" #, python-format msgid "Not allowed to update members for image %s." msgstr "%s imajı için üyelerin güncellenmesine izin verilmedi." #, python-format msgid "Not allowed to update tags for image %s." msgstr "%s imajı için etiketlerin güncellenmesine izin verilmez." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "%(image_id)s imajı için imaj verisi yüklenmesine izin verilmedi: %(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "'%s' bir dizi idx değildir" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "Sıralama dizinlerinin sayısı, sıralama anahtarlarının sayısıyla eşleşmez" msgid "Old and new sorting syntax cannot be combined" msgstr "Eski ve yeni sıralama sözdizimi birleştirilemez" msgid "Only list indexes are allowed for blob lists" msgstr "Blob listeleri için sadece liste indislerine izin verilir" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "\"%s\" işlemi \"değer\" olarak adlandırılan bir üye ister." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "İşlem nesneleri \"ekle\", \"kaldır\" ya da \"değiştir\" olarak adlandırılan " "tam olarak bir üye içermelidir." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "İşlem nesneleri, \"ekle\", \"kaldır\" ya da \"değiştir\" olarak adlandırılan " "sadece bir üye içermelidir." msgid "Operations must be JSON objects." msgstr "İşlemler JSON nesnesi olmalıdır." #, python-format msgid "Operator %(op)s is not supported" msgstr "Operatör %(op)s desteklenmiyor" #, python-format msgid "Original locations is not empty: %s" msgstr "Özgün konumlar boş değil: %s" msgid "Owner must be specified to create a tag." msgstr "Etiket oluşturmak için sahibi belirtilmelidir." msgid "Owner of the image" msgstr "İmajın sahibi" msgid "Owner of the namespace." msgstr "Ad alanı sahibi." msgid "Param values can't contain 4 byte unicode." msgstr "Param değerleri 4 bayt unikod içermez." msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "Servis adı ile yapıştırma yapılandırma dosyanızdaki iletişim hattının " "parçalı adı kaldırıldı. Örnek, eğer yapıştırma bölüm adınız [pipeline:glance-" "api-keystone] ise \"keystone\" değerini kullan" msgid "Path to the directory where json metadata files are stored" msgstr "Json metadata dosyalarının depolandığı dizin yolu" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "Eklenti adı '%(plugin)s' ile ürün tür adı '%(name)s' eşleşmelidir" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "`%s` işaretçisi tanınmayan bir vazgeçme dizisinin parçası olmayan \"~\" " "içerir." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "`%s` işaretçisi bitişik \"/\" içerir." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "`%s`işaretçisi geçerli jeton içermez." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "`%s` işaretçisi \"/\" ile başlamaz." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "`%s` işaretçisi \"/\" ile sonlanır." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "İşaretçi tanınmış [~0, ~1] kaçış dizisi parçası olmayan '~' içerir." #, python-format msgid "Port \"%s\" is not valid." msgstr "Bağlantı noktası \"%s\" geçersizdir." msgid "Port the registry server is listening on." msgstr "Kayıt sunucusunun üzerinde dinlediği bağlantı noktası." #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "Önsürüm sayısal bileşeni çok büyük (en fazla %d karakter)" msgid "Private key file to use when starting API server securely." msgstr "API sunucusu güvenli başlatılırken kullanılacak özel anahtar dosyası." #, python-format msgid "Process %d not running" msgstr "%d süreci çalışmıyor" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "%s özellikleri veri kaydetmeden önce ayarlanmış olmalıdır." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "%(property_name)s özelliği beklenen kaynak tür ilişkilendirme ön eki " "'%(prefix)s' ile başlamaz." #, python-format msgid "Property %s already present." msgstr "Özellik %s zaten mevcut." #, python-format msgid "Property %s does not exist." msgstr "Özellik %s mevcut değil." #, python-format msgid "Property %s may not be removed." msgstr "Özellik %s kaldırılamayabilir." #, python-format msgid "Property %s must be set prior to saving data." msgstr "%s özelliği veri kaydetmeden önce ayarlanmış olmalıdır." #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "'%(name)s' özelliği '%(val)s' değerine sahip olmayabilir: %(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "'%s' özelliği korumalıdır" msgid "Property names can't contain 4 byte unicode." msgstr "Özellik adları 4 bayt unicode içeremez." #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "%(rule)s kuralı için %(operation)s işleminde özellik koruma bulunamadı. Bu " "işlemi gerçekleştirmek için hiçbir role izin verilmeyecektir." #, python-format msgid "Property's %(prop)s value has not been found" msgstr "Özelliklerin %(prop)s değeri bulunamadı" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "Sağlanan imaj boyutu depolanan imaj boyutu ile eşleşmelidir. (sağlanan " "boyut: %(ps)d, depolanan boyut: %(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "Sağlanan nesne '%(schema)s' şeması ile eşleşmez: %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Sağlanan görev durumu desteklenmiyor: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Sağlanan görev türü desteklenmiyor: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Ad alanı için kullanıcı dostu bir açıklama sağlar." msgid "Public images do not have members." msgstr "Ortak imajlar üyelere sahip değil." msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "Sürüm uç noktaları için kullanılacak ortak url. Öntanımlı olan Hiçbiri, URL " "tabanını doldurmak için isteğin host_url özniteliğini kullanacak. Eğer " "Glance bir vekil sunucu arkasında işletiliyorsa, vekil sunucunun URL'ini " "gösterecek şekilde değiştirmek isteyeceksiniz." msgid "Python module path of data access API" msgstr "Veri erişim API'sinin Python modül yolu" msgid "Received invalid HTTP redirect." msgstr "Geçersiz HTTP yeniden yönlendirme isteği alındı." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Yetkilendirme için %(uri)s adresine yeniden yönlendiriliyor." #, python-format msgid "Registry service can't use %s" msgstr "Kayıt defteri servisi %s kullanamaz" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Kayıt defteri API sunucusunda doğru bir şekilde yapılandırılamadı. Nedeni: " "%(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "%(name)s bağlantısının birden çok değeri olmayabilir" #, python-format msgid "Reload of %(serv)s not supported" msgstr "%(serv)s yeniden yükleme desteklenmiyor" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "(%(sig)s) sinyali ile %(serv)s (pid %(pid)s) yeniden yükleniyor" #, python-format msgid "Removing stale pid file %s" msgstr "Bozuk pid dosyası %s kaldırılıyor" msgid "Request body must be a JSON array of operation objects." msgstr "İstek vücudu işlem nesnelerinin bir JSON dizisi olmalıdır." msgid "Request must be a list of commands" msgstr "İstek komutların bir listesi olmalıdır" #, python-format msgid "Required store %s is invalid" msgstr "İstenen depo %s geçersizdir" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "Kaynak tür adları her fırsatta, Heat kaynak türleri ile hizalanmalıdır: " "http://docs.openstack.org/developer/heat/template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Keystone yanıtı bir Glance uç noktası içermiyor." msgid "Role used to identify an authenticated user as administrator." msgstr "" "Rol, kimliği doğrulanmış bir kullanıcıyı yönetici olarak belirlemek için " "kullanılır." msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "Uzun sürecek bir işlem olarak çalıştır. Belirtilmezse (öntanımlı) iptal " "işlemi bir kez çalıştırılır ve sonrasında çıkar. Eğer belirtilirse, çıkmaz " "ve yapılandırma ayarında belirtilen wakeup_time aralığında çalıştırılır." msgid "Scope of image accessibility" msgstr "İmaj erişilebilirlik kapsamı" msgid "Scope of namespace accessibility." msgstr "Ad alanı erişebilirlik kapsamı." #, python-format msgid "Server %(serv)s is stopped" msgstr "Sunucu %(serv)s durdurulur" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Sunucu işçisi oluşturma işlemi başarısız oldu: %(reason)s." msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "Her kullanıcı için geniş bir sistem kotası ayarla. Bu değer, bir " "kullanıcının bütün depolama sistemlerinde kullanabileceği toplam " "kapasitedir. 0 değerinin anlamı, limitsizdir. Değer için isteğe bağlı birim " "belirtilebilir. Beklenen birimler B, KB, MB, GB ve TB, sırasıyla bayt, " "KiloBayt, MegaBayt, GigaBayt ve TeraBayttır. Eğer birim belirtilmezse, Bayt " "varsayılır. Değer ve birim arasında boşluk olmaması gerektiğine ve " "birimlerin küçük/büyük harf duyarlı olmasına dikkat edin." #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "Bu işlemde %(shl)s seviyesi gösterme desteklenmez" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Bazı kaynak türleri her sunucu başına birden fazla anahtar / değer çiftine " "izin verir. Örneğin, Cinder mantıksal sürücü üzerinde kullanıcı ve imaj " "metadatalarına izin verir. Sadece imaj özellikleri metadataları Nova ile " "değerlendirilir (zamanlama ya da sürücüler). Bu özellik belirsizliği " "kaldırmak için bir ad alanı hedefine olanak sağlar." msgid "Sort direction supplied was not valid." msgstr "Sağlanan sıralama yönü geçersizdir." msgid "Sort key supplied was not valid." msgstr "Sağlanan sıralama anahtarı geçersizdir." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Verilen kaynak türü için kullanılacak öneki belirtir. Ad alanındaki her " "özellik belirtilen kaynak türüne uygulanırken önek eklenmelidir. Önek " "ayıracı içermelidir (örneğin; :)." msgid "Specifies which task executor to be used to run the task scripts." msgstr "" "Görev betiklerini çalıştırmak için kullanılacak görev yürütücüsünü belirtir." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Durum \"bekliyor\", \"kabul edildi\" ya da \"reddedildi\" olmalıdır." msgid "Status not specified" msgstr "Durum belirtilmemiş" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "%(cur_status)s mevcut durumundan %(new_status)s yeni duruma geçişe izin " "verilmez" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "(%(sig)s) sinyali ile %(serv)s (pid %(pid)s) durduruluyor" #, python-format msgid "Store for image_id not found: %s" msgstr "image_id için depo bulunamadı: %s" #, python-format msgid "Store for scheme %s not found" msgstr "%s şeması için depo bulunamadı" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "Verilen %(attr)s (%(supplied)s) ve yüklenen imajdan (%(actual)s) oluşturulan " "%(attr)s uyuşmadı. Görüntü durumu ayarlama 'killed'." msgid "Supported values for the 'container_format' image attribute" msgstr "'container_format' imaj özniteliği için desteklenen değerler" msgid "Supported values for the 'disk_format' image attribute" msgstr "'disk_format' imaj özniteliği için desteklenen değerler" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "%(serv)s olarak yeniden oluşturulması durdurulan, %(rsn)s idi." msgid "System SIGHUP signal received." msgstr "Sistem SIGHUP sinyali aldı." #, python-format msgid "Task '%s' is required" msgstr "'%s' görevi gereklidir" msgid "Task does not exist" msgstr "Görev mevcut değil" msgid "Task failed due to Internal Error" msgstr "Görev Dahili Hata nedeniyle başarısız oldu" msgid "Task was not configured properly" msgstr "Görev düzgün bir şekilde yapılandırılmadı." #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Verilen %(task_id)s ile görev bulunamadı" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "" "\"belli bir zamandan sonraki değişiklikler\" süzgeci v2 sürümünde artık " "mevcut değil." #, python-format msgid "The CA file you specified %s does not exist" msgstr "Belirtilen %s CA dosyası mevcut değil" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "%(task_id)s görevi ile oluşturulan %(image_id)s imaj nesnesi, artık ileri " "işlem için geçerli durumda değildir." msgid "The Store URI was malformed." msgstr "Depo URI'si bozulmuş." msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "Keystone hizmeti için URL. Eğer \"use_user_token\" yürürlükte değilse ve " "keystone kimlik doğrulaması kullanılıyorsa, o zaman keystone URL'i " "belirtilebilir." msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "Swift kimlik doğrulama servisinin dinlediği adres.(önerilmiyor)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Yönetici parolası. Eğer \"use_user_token\" yürürlükte değilse, o zaman " "yönetici kimlik bilgileri belirtilebilir." msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Yönetici kullanıcı adı. Eğer \"use_user_token\" yürürlükte değilse, o zaman " "yönetici kimlik bilgileri belirtilebilir." msgid "The amount of time in seconds to delay before performing a delete." msgstr "Bir silme işlemi gerçekleşmeden önce saniye cinsinden gecikme miktarı." msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "TCP dinleyici soket oluştururken kullanılacak hizmet bekleyen değeri." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Belirtilen %s sertifika dosyası mevcut değil" msgid "The config file that has the swift account(s)configs." msgstr "Swift hesap(ları) yapılandırmalarına sahip yapılandırma dosyası." msgid "The current status of this task" msgstr "Görevin şu anki durumu" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "İmaj önbellek dizininin %(image_cache_dir)s yer aldığı aygıt xattr " "desteklemiyor. Önbellek dizini içeren aygıt için fstab düzenlemeniz ve uygun " "satıra user_xattr seçeneği eklemeniz gerekebilir." msgid "The driver to use for image cache management." msgstr "İmaj önbellek yönetimi için kullanılacak sürücü." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "Verilen uri geçersizdir. Lütfen, desteklenen uri listesinden %(supported)s " "geçerli bir uri belirtin" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "Hata ayıklama bağlantıları için pydev süreç dinleme makine adı/IP" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "%s imajı zaten köle üzerinde mevcut, fakat kontrol ettiğimizde bulamadık. Bu " "da, köle sunucular üzerindeki bütün imajları görmeye iznimizin olmadığını " "gösterir." #, python-format msgid "The incoming image is too large: %s" msgstr "Gelen imaj çok büyük: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Belirttiğiniz %s anahtar dosyası mevcut değil" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "İzin verilen imaj konumlarının sayı sınırı aşıldı.Denenen: %(attempted)s, " "Azami: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Bu imaj için izin verilen imaj üye sınırı aşıldı.Denenen: %(attempted)s, En " "fazla: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "İzin verilen imaj özelliklerinin sayı sınırı aşıldı.Denenen: %(attempted)s, " "Azami: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "İmaj özelliklerinde izin verilen sınır aşıldı.Denenen: %(num)s, En fazla: " "%(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "İzin verilen imaj etiketlerinin sayı sınırı aşıldı.Denenen: %(attempted)s, " "Azami: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "%(location)s konumu zaten mevcut" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Konum verisi geçersiz bir kimliğe sahip: %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "Özellik koruma dosyasının konumu. Bu dosya özellik korumaları ve bunlarla " "ilişkilendirilmiş rol/ilkeler için kurallar içerir. Eğer yapılandırma değeri " "belirtilmemişse, öntanımlı olarak, özellik korumaları zorunlu olmayacaktır. " "Eğer bir değer belirtilirse ve dosya bulunamazsa, o zaman glance-api servisi " "başlamayacak." #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "Ad=%(record_name)s ile metadata tanımı %(record_type)s silinebilir değil. " "Diğer kayıtlar hala onu gösteriyor." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "Metadata tanım ad alanı=%(namespace_name)s zaten mevcut." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "Ad=%(object_name)s ile metadata tanım nesnesi ad alanında=%(namespace_name)s " "bulunamadı." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "Ad=%(property_name)s ile metadata tanım özelliği ad alanında=" "%(namespace_name)s bulunamadı." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "Ad alanına=%(namespace_name)s kaynak türünün=%(resource_type_name)s metadata " "tanım kaynak tür ilişkisi zaten mevcut." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "Kaynak türünün=%(resource_type_name)s ad alanında=%(namespace_name)s, " "metadata tanım kaynak-tür ilişkisi bulunamadı." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "Ad=%(resource_type_name)s ile metadata tanım kaynak-türü bulunamadı." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "Ad=%(name)s ile metadata tanım etiketi ad alanında=%(namespace_name)s " "bulunamadı." msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "Motorun çalıştırılacağı kip. 'serial' ya da 'parallel' olabilir." msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "Servis istekleri için oluşturulacak alt süreç işçilerinin sayısı. Öntanımlı " "olarak kullanılabilir CPU sayısına eşit olacaktır." msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "Motor tarafından aynı anda yürütülen paralel etkinlik sayısı. Bu değer motor " "kipi 'parallel' iken birden büyük olabilir." msgid "The parameters required by task, JSON blob" msgstr "JSON blob, görev tarafından istenen parameteler" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "Varsa, kayıt defteri sunucusuna SSL bağlantısında kullanılacak sertifika " "dosya yolu. Alternatif olarak, CA sertifika dosyasının dosya yolu için " "GLANCE_CLIENT_CERT_FILE ortam değişkeni ayarlayabilirsiniz" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "Varsa, kayıt defteri sunucusuna SSL bağlantısında kullanılacak yetki " "onaylama sertifika dosyasının yolu. Alternatif olarak, CA sertifika " "dosyasının yolu için GLANCE_CLIENT_CA_FILE ortam değişkenini " "ayarlayabilirsiniz." msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "Varsa, kayıt defteri sunucusuna SSL bağlantısında kullanılacak anahtar dosya " "yolu. Alternatif olarak, anahtar dosyasının dosya yolu için " "GLANCE_CLIENT_KEY_FILE ortam değişkeni ayarlayabilirsiniz" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "İmaj önbellek yönetimi için kullanılacak sqlite dosya veritabanı yolu." msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "Bir kayıt defteri isteğinin tamamlanması için API sunucusunun bekleyeceği " "saniye cinsinden zaman aralığı. 0 değeri zaman aşımı olmadığını ifade eder." msgid "The port on which a pydev process is listening for connections." msgstr "Pydev sürecindeki bağlantı noktası bağlantılar için dinliyor." msgid "The port on which the server will listen." msgstr "Sunucu üzerindeki bağlantı noktası dinleyecek." msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "" "Kayıt defteri sunucusu ile iletişim için kullanılacak protokol. Ya http ya " "da https." #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "Verilen şema altında: %(schema)s sağlanan %(body)s gövdesi geçersizdir" msgid "The provided image is too large." msgstr "Getirilen imaj çok büyük." #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "Sağlanan yol '%(path)s' geçersizdir: %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "Yeni imajlar eklemek için kullanılacak öntanımlı swift hesap/yedekleme " "deposu parametrelerine referans." msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "Kimlik doğrulama servisi için bölge. Eğer \"use_user_token\" yürürlükte " "değilse ve keystone kimlik doğrulaması kullanılıyorsa, bölge adı " "belirtilebilir." msgid "The request returned 500 Internal Server Error." msgstr "İstek geri 500 İç Sunucu Hatası döndürdü." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "İstek 503 Hizmet Kullanılamıyor kodu döndürdü. Bu genellikle, hizmetin aşırı " "yük altında olduğu ya da geçici kesintiler oluştuğu anlamına gelir." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "İstek 302 Çok Seçenek kodu döndürdü. Bu genellikle, istek URI'sinin bir " "sürüm göstergesi içermediği anlamına gelir.\n" "\n" "Dönen yanıtın gövdesi:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "İstek 413 Girilen Veri Çok Büyük kodu döndürdü. Bu genellikle, hız " "sınırlayıcı ya da kota eşiği ihlali anlamına gelir.\n" "\n" "Yanıt gövdesi:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "İstek beklenmeyen bir durum döndürdü: %(status)s.\n" "\n" "Yanıt:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "İstenen imaj devrede değil. İmaj verisi indirmek yasak." msgid "The result of current task, JSON blob" msgstr "Şu anki görevin sonucu, JSON blob" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "%(image_size)s veri boyutu sınırı aşacak. Kalan bayt %(remaining)s " #, python-format msgid "The specified member %s could not be found" msgstr "Belirtilen üye %s bulunamadı" #, python-format msgid "The specified metadata object %s could not be found" msgstr "Belirtilen metadata nesnesi %s bulunamadı" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "Belirtilen metadata etiketi %s bulunamadı" #, python-format msgid "The specified namespace %s could not be found" msgstr "Belirtilen ad alanı %s bulunamadı" #, python-format msgid "The specified property %s could not be found" msgstr "Belirtilen özellik %s bulunamadı" #, python-format msgid "The specified resource type %s could not be found " msgstr "Belirtilen kaynak türü %s bulunamadı " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "Silinen imaj konumunun durumu sadece 'pending_delete' ya da 'deleted' olarak " "ayarlanabilir" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "Silinen imaj konum durumu sadece 'pending_delete' ya da 'deleted' olarak " "ayarlanabilir." msgid "The status of this image member" msgstr "Bu imaj üyesinin durumu" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "Depo tercih sırasını almak için kullanılacak depo adları. Ad, 'stores' " "yapılandırma seçeneği tarafından tanımlanan depolardan biri olarak " "kaydedilmelidir. Bu seçenek, 'store_type' seçeneğini, 'location_strategy' " "yapılandırma seçeneği tarafından tanımlanan imaj konum stratejisi olarak " "kullanırken uygulanacaktır." msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "Kimlik doğrulama için kullanılacak strateji. Eğer \"use_user_token\" " "yürürlükte değilse, o zaman kimlik doğrulama stratejisi belirtilebilir." #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "Hedef üye %(member_id)s, %(image_id)s imajı ile zaten ilişkilendirilmiştir." msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "İdari kullaıcının kiracı adı. Eğer \"use_user_token\" yürürlükte değilse, o " "zaman yönetici kiracı adı belirtilebilir." msgid "The type of task represented by this content" msgstr "Bu içerik ile sunulan görev türü" msgid "The unique namespace text." msgstr "Eşsiz ad alanı metni." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Kullanıcı dostu ad alanı adı. Eğer mevcut ise, kullanıcı arayüzü tarafından " "kullanılır." msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "" "Swift kimlik doğrulama servisine dayalı doğrulanacak kullanıcı(önerilmiyor)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "TCP_KEEPIDLE soket seçeneği için değer. Bu TCP canlı mı araştırma istekleri " "göndermeye başlamadan önce bağlantının boşta olması gereken saniye cinsinden " "süredir." #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "%(error_key_name)s %(error_filename)s ile ilgili bir sorun var. Lütfen " "doğrulayın. Hata: %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "%(error_key_name)s %(error_filename)s ile ilgili bir sorun var. Lütfen " "doğrulayın. OpenSSL hatası: %(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "Anahtar çiftiniz ile ilgili bir sorun var. Lütfen sertifika %(cert_file)s " "ve anahtarın %(key_file)s birbirine ait olduğunu doğrulayın. OpenSSL hatası " "%(ce)s" msgid "There was an error configuring the client." msgstr "İstemci yapılandırılırken bir hata meydana geldi." msgid "There was an error connecting to a server" msgstr "Sunucuya bağlanırken bir hata meydana geldi" msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "Yapılandırma değeri özellik koruma dosyasında \"roller\" ya da \"ilkeler\" " "kullanılıp kullanılmadığını gösterir." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Şu anda Glance Görevleri üzerinde bu işleme izin verilmiyor. Onlar " "expires_at özellikliğine göre süreleri dolduktan sonra otomatik silinirler." msgid "This operation is currently not permitted on Glance images details." msgstr "Bu işleme şu anda Glance imaj ayrıntılarında izin verilmez." msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "Bu değer imaj konum sırası tespiti için kullanılacak stratejiyi ayarlar. Şu " "anda iki strateji Glance 'location_order' ve 'store_type' ile paketlenir." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Bir görevin başarılı ya da başarısız olarak sonuçlanmasından sonra saat " "olarak yaşayacağı süre" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "İstemci bağlantılarının soket işlemleri için zaman aşımı. Eğer gelen bir " "bağlantı bu süre boyunca boştaysa, kapatılacaktır. '0' değerinin anlamı " "sonsuza kadar beklemesidir." msgid "Too few arguments." msgstr "Çok fazla değişken." msgid "Too few locations" msgstr "Çok az konum" msgid "Too many locations" msgstr "Çok fazla konum" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "%(img_count)d imajlarında toplam boyut %(size)d" msgid "Turn on/off delayed delete." msgstr "Gecikmiş silme aç/kapat." msgid "Type version has to be a valid semver string" msgstr "Tür sürümü geçerli bir semver karakter dizisi olmalıdır" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "URI bir şemanın birden fazla olayını içeremez. Eğer URI'yi swift://user:" "pass@http://authurl.com/v1/container/obj gibi belirttiyseniz, swift+http:// " "şemasını kullanmak için onu değiştirmeniz gerekir, şu şekilde: swift+http://" "user:pass@authurl.com/v1/container/obj" msgid "Unable to PUT to non-empty collection" msgstr "Boş olmayan koleksiyona PUT işlemi yapılamadı" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Pid dosyası %(pid)s oluşturulamadı. Root olmadan çalıştırılsın mı?\n" "Geçici bir dosyaya geri düşüyor, şu komutları kullanarak %(service)s " "servisini durdurabilirsiniz:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgid "Unable to filter on a range with a non-numeric value." msgstr "Sayısal olmayan değer ile bir aralıkta süzme yapılamadı." msgid "Unable to filter using the specified range." msgstr "Belirtilen aralık kullanılarak süzme yapılamadı." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "JSON Şema değişikliğinde '%s' bulunamadı" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "JSON Şema değişikliğinde `op` bulunamadı. Şu seçeneklerden biri olmalıdır: " "%(available)s." msgid "Unable to get legacy image" msgstr "Eski imaj alınamadı" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "Dosya tanıtıcı sınır arttırılamadı. Root olmadan çalıştırılsın mı?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "%(conf_file)s yapılandırma dosyasından %(app_name)s uygulaması yüklenemedi.\n" "Alınan: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Şema yüklenemedi: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "%s için yapıştırma yapılandırma dosyası yerleştirilemedi." msgid "Unable to modify collection in immutable or readonly property" msgstr "Değişmez ya da salt okunur özellikteki koleksiyon değiştirilemedi" msgid "Unable to retrieve request id from context" msgstr "İçerikten istek bilgisi alınamadı" msgid "Unable to specify artifact type explicitly" msgstr "Ürün türü açıkça belirtilemedi" msgid "Unable to specify artifact type version explicitly" msgstr "Ürün türünün sürümü açıkça belirtilemedi" msgid "Unable to specify version if multiple types are possible" msgstr "Birden fazla tür olduğundan sürüm belirtilemedi" msgid "Unable to specify version if type is not specified" msgstr "Eğer türü belirtilmediyse, sürümü belirlenemez" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "%(image_id)s imajı için çift imaj verisi yüklenemedi: %(error)s" msgid "Unauthorized image access" msgstr "Yetkisiz imaj erişimi" #, python-format msgid "Unexpected response: %s" msgstr "Beklenmeyen yanıt: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Bilinmeyen kimlik doğrulama stratejisi '%s'" #, python-format msgid "Unknown command: %s" msgstr "Bilinmeyen komut: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Bilinmeyen sıralama yönü, 'desc' or 'asc' olmalıdır" msgid "Unrecognized JSON Schema draft version" msgstr "Tanınmayan JSON Şeması taslak sürümü" msgid "Unrecognized changes-since value" msgstr "Belli bir zamandan sonraki tanınmayan değişiklik değeri" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "Desteklenmeyen sort_dir. Kabul edilen değerler: %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "Desteklenmeyen sort_key. Kabul edilen değerler: %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "%(value)d değeri aralık dışında, %(max)d değerini geçmemelidir" msgid "Value is greater than maximum" msgstr "Değer azami değerden daha büyüktür" msgid "Value is less than minimum" msgstr "Değer asgari değerden azdır" msgid "Value is required" msgstr "Değer gereklidir" #, python-format msgid "Version component is too large (%d max)" msgstr "Sürüm bileşeni çok büyük (en fazla %d)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "Sürüm geçersizdir: %(reason)s" msgid "Visibility must be either \"public\" or \"private\"" msgstr "Görünürlük ya \"ortak\" ya da \"özel\" olmalıdır" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "%(pid)s (%(file)s) pid'i öldürmek için 15 saniye beklendi; vazgeçiliyor" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "Yanlışken, hiçbir ürün available_plugins dikkate alınmadan yüklenemez. " "Doğruyken, ürünler yüklenebilir." msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "Sunucu SSL kipte çalışırken, cert_file ve key_file değerlerinin ikisinide " "yapılandırma dosyanızda belirtmelisiniz" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "Seçildiğinde, bu seçenek bir imajın sahibini kiracı olacak şekilde ayarlar." "Diğer bir deyişle, imajın sahibi, isteği bildiren ve kimliği doğrulanmış " "kişi olacaktır." msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "Kayıt defteri sunucusuna bağlantı sırasında SSL kullanılırsa, bir onaylama " "otoritesi yoluyla doğrulama gerekmez. API için glanceclient kullanarak komut " "satırında --insecure belirtilmesi, kayıt defterine eşdeğerdir." msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "İmaj şema sağlayıcılarının haricinde imaj özelliklerinin belirtilmesinde " "kullanıcılara izin verilip verilmemesi" msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "İmaj özelliklerinin, art alanda çalışan imaj depolama konumunu içerip " "içermemesi. Depolama konumunu açığa çıkarmak güvenlik tehlikesi " "oluşturabilir, bu yüzden bu ayarı dikkatli kullanın!" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "Kayıt defteri sunucusuna istek yaparken kiracı bilgisi ve kullanıcı içeren " "başlığın geçirilip geçirilmemesi. Bu kayıt defteri sunucusunun " "keystonemiddleware auth_token katmanı olmadan içerik katmanını kullanmasına, " "keystone kimlik doğrulama servisine çağrıların kaldırılmasına izin verir. Bu " "seçenek kullanılırken, glance-api ve glance kayıt defteri arasında " "auth_token katmanı dışında başka bir yol ile güvenli iletişim sağlanır." msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "Kayıt defteri sunucusuna istek yaparken kullanıcı jetonunun geçirilip " "geçirilmemesi. Büyük dosyaların yüklenmesi sırasında jetonun süresinin sona " "ermesi ile oluşacak hataları engellemek için, bu parametrenin seçilmemiş " "olarak ayarlanması önerilir. Eğer \"use_user_token\" yürürlükte değilse, o " "zaman yönetici kimlik bilgileri belirtilebilir." msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "Eşzamansız görev işlemleri için çalışma dizini. Burada ayarlanan dizin, " "imajları işlemek için kullanılacaktır - normalde hedef depoya aktarılmadan " "önce. Çalışma dizini sağlanırken, eşzamanlı görevleri verimli bir şekilde " "çalıştırmak için yeterli alan sağladığınıza emin olun. Kabaca bir tahmin " "`max_workers` sayısı - ya da N çalışan işçi - ortalama imaj boyutu ile " "çarpılarak yapılabilir (örneğin 500MB). İmaj boyutu tahmini, dağıtımınızdaki " "ortalama boyuta göre yapılmalıdır. Görevin çalışmasına bağlı olarak, göreve " "bağlı bazı katsayılar ile bu sayıyı çarpmanızın gerekebileceğini unutmayın. " "Örnek olarak, eğer imaj dönüştürme etkinse, kullanılabilir boyutun iki " "katını isteyebilirsiniz. Bu söylenenlerin tümünün sadece tahmin olduğunu " "unutmamalı ve tahminleri en kötü senaryoya göre yapmalısınız ve hatalı " "durumlar için hazır olmalısınız." #, python-format msgid "Wrong command structure: %s" msgstr "Hatalı komut yapısı: %s" msgid "You are not authenticated." msgstr "Kimliğiniz doğrulanamadı." msgid "You are not authorized to complete this action." msgstr "Bu eylemi tamamlamak için yetkili değilsiniz." #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "'%s''ye ait ad alanında bir etiket oluşturma izniniz yok" msgid "You are not permitted to create image members for the image." msgstr "İmaj için üye oluşturma izniniz yok." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "'%s''ye ait imaj oluşturma izniniz yok." #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "'%s''ye ait ad alanı oluşturma izniniz yok" #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "'%s''ye ait nesne oluşturma izniniz yok" #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "'%s''ye ait özellik oluşturma izniniz yok." #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "'%s''ye ait resource_type oluşturma izniniz yok." #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "Sahibi olarak bu görevi oluşturma izniniz yok: %s" msgid "You are not permitted to delete this image." msgstr "Bu imajı silme izniniz yok." msgid "You are not permitted to delete this meta_resource_type." msgstr "meta_resource_type silme izniniz yok." msgid "You are not permitted to delete this namespace." msgstr "Bu ad alanını silme izniniz yok." msgid "You are not permitted to delete this object." msgstr "Bu nesneyi silme izniniz yok." msgid "You are not permitted to delete this property." msgstr "Bu özelliği silme izniniz yok." msgid "You are not permitted to delete this tag." msgstr "Bu etiketi silme izniniz yok." #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "Bu %(resource)s üzerinde '%(attr)s' değiştirme izniniz yok." #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "Bu imajda '%s' değiştirme izniniz yok." msgid "You are not permitted to modify locations for this image." msgstr "Bu imajın konumunu değiştirme izniniz yok." msgid "You are not permitted to modify tags on this image." msgstr "Bu imaj üzerindeki etiketleri değiştirme izniniz yok." msgid "You are not permitted to modify this image." msgstr "Bu imajı değiştirme izniniz yok." msgid "You are not permitted to set status on this task." msgstr "Bu görev üzerinde durum ayarlama izniniz yok." msgid "You are not permitted to update this namespace." msgstr "Bu ad alanını güncelleme izniniz yok." msgid "You are not permitted to update this object." msgstr "Bu nesneyi güncelleme izniniz yok." msgid "You are not permitted to update this property." msgstr "Bu özelliği güncelleme izniniz yok." msgid "You are not permitted to update this tag." msgstr "Bu etiketi güncelleme izniniz yok." msgid "You are not permitted to upload data for this image." msgstr "Bu imaj için veri yükleme izniniz yok." #, python-format msgid "You cannot add image member for %s" msgstr "%s için imaj üyesi ekleyemiyorsunuz" #, python-format msgid "You cannot delete image member for %s" msgstr "%s için imaj üyesini silemiyorsunuz" #, python-format msgid "You cannot get image member for %s" msgstr "%s için imaj üyesini alamıyorsunuz" #, python-format msgid "You cannot update image member %s" msgstr "%s imaj üyesini güncelleyemiyorsunuz" msgid "You do not own this image" msgstr "Bu imajın sahibi değilsiniz" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Bağlanırken SSL kullanmayı seçtiniz ve bir sertifika sağladınız, ancak ya " "key_file parametresi sağlamayı ya da GLANCE_CLIENT_KEY_FILE değişkeni " "ayarlama işlemini başaramadınız." msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Bağlanırken SSL kullanmayı seçtiniz ve bir anahtar sağladınız, ancak ya " "cert_file parametresi sağlamayı ya da GLANCE_CLIENT_CERT_FILE değişkeni " "ayarlama işlemini başaramadınız." msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() beklenmeyen anahtar sözcük değişkeni '%s' aldı" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "güncellemede (istenen from_state=%(from)s), %(current)s mevcut durumundan " "%(next)s sonrakine geçiş olamaz " #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "özel özellikler (%(props)s) temel özellikler ile çatışır" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "bu platformda eventlet 'poll' ya da 'selects' havuzları kullanılabilirdir" msgid "is_public must be None, True, or False" msgstr "is_public Hiçbiri, Doğru ya da Yanlış olmalıdır" msgid "limit param must be an integer" msgstr "Sınır parametresi tam sayı olmak zorunda" msgid "limit param must be positive" msgstr "Sınır parametresi pozitif olmak zorunda" #, python-format msgid "location: %s data lost" msgstr "konum: %s verisi kayıp" #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() beklenmeyen anahtar sözcük %s aldı" msgid "protected must be True, or False" msgstr "korumalı Doğru ya da Yanlış olmalıdır" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "%(serv)s başlatılamadı. Alınan hata: %(e)s" glance-12.0.0/glance/locale/zh_CN/0000775000567000056710000000000012701407204017652 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/zh_CN/LC_MESSAGES/0000775000567000056710000000000012701407204021437 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/zh_CN/LC_MESSAGES/glance.po0000664000567000056710000031322612701407047023244 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # blkart , 2015 # Dongliang Yu , 2013 # Kecheng Bi , 2014 # Tom Fifield , 2013 # 颜海峰 , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Linda , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev41\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-12 00:22+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-11 03:05+0000\n" "Last-Translator: Linda \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s 必须是字符串" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s 是必需属性" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s 不能长于 %(length)i" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s 不能短于 %(length)i" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s 应当与模式 %(pattern)s 相匹配" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "最后一个 RPC 调用中发生 %(cls)s 异常:%(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "在映像 %(i_id)s 的成员列表中找不到 %(m_id)s。" #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) 正在运行..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s 似乎已在运行:%(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "已两次将 %(strategy)s 注册为模块。未在使用 %(module)s。" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "%(task_id)s(类型为 %(task_type)s)未正确配置。未能装入文件系统存储器" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s(类型为 %(task_type)s)未正确配置。缺少工作目录:%(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "正在%(verb)s %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "正在%(verb)s %(serv)s(借助 %(conf)s)" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s 请指定 host:port 对,其中 host 是 IPv4 地址、IPv6 地址、主机名或 FQDN。如" "果使用 IPv6 地址,请将其括在方括号中并与端口隔开(即,“[fe80::a:b:" "c]:9876”)。" #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s 不能包含 4 字节 Unicode 字符。" #, python-format msgid "%s is already stopped" msgstr "%s 已停止" #, python-format msgid "%s is stopped" msgstr "%s 已停止" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "“%(param)s”值超出范围,不得超过 %(max)d" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "当启用了 keystone 认证策略时,需要 --os_auth_url 选项或 OS_AUTH_URL 环境变" "量\n" msgid "A body is not expected with this request." msgstr "此请求不应有主体。" msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "允许采用 name 或 name-version 格式的工件的列表。空列表意味着可以装入任何工" "件。" #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,已存在名称为 %(object_name)s 的元数据定义对" "象。" #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,已存在名称为 %(property_name)s 的元数据定义" "属性。" #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "已存在名称为 %(resource_type_name)s 的元数据定义资源类型。" #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "名称空间 %(namespace_name)s 中已存在名称为 %(name)s 的元数据标记。" msgid "A set of URLs to access the image file kept in external store" msgstr "用于访问外部存储器中保留的映像文件的 URL集合" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "用于加密存储“位置”元数据的 AES 密钥。如果已使用,那么这包含 Swift 或 S3 凭" "证。应该设置为随机字符串,长度为 16、24 或 32 个字节" msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "用于绑定服务器的地址。当选择特定网络接口时,此项很有用。" msgid "Address to find the registry server." msgstr "用于查找注册表服务器的地址。" msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "当使用上下文中间件ContextMiddleware的时候将允许未授权的用户拥有API接口的只读" "权限" #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "允许值 %s 在所给定的验证程序下无效" msgid "Amount of disk space (in GB) required to boot image." msgstr "引导映像所需的磁盘空间量(以 GB 计)。" msgid "Amount of ram (in MB) required to boot image." msgstr "引导映像所需的 ram 量(以 MB 计)。" msgid "An identifier for the image" msgstr "映像的标识" msgid "An identifier for the image member (tenantId)" msgstr "映像成员的标识 (tenantId)" msgid "An identifier for the owner of this task" msgstr "此任务的所有者的标识" msgid "An identifier for the task" msgstr "任务的标识" msgid "An image file url" msgstr "映像文件的 URL" msgid "An image schema url" msgstr "映像模式的 URL" msgid "An image self url" msgstr "映像本身的 URL" #, python-format msgid "An image with identifier %s already exists" msgstr "具有标识 %s 的映像已存在" msgid "An import task exception occurred" msgstr "发生了导入任务异常。" msgid "An object with the same identifier already exists." msgstr "具有同一标识的对象已存在。" msgid "An object with the same identifier is currently being operated on." msgstr "当前正在对具有同一标识的对象进行操作。" msgid "An object with the specified identifier was not found." msgstr "找不到具有指定标识的对象。" msgid "An unknown exception occurred" msgstr "发生未知异常" msgid "An unknown task exception occurred" msgstr "发生未知任务异常" #, python-format msgid "Array has no element at position %d" msgstr "在阵列中的 %d 位置没有任何元素" msgid "Array property can't have item_type=Array" msgstr "阵列属性不得具有 item_type=Array" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "未能删除工件 %s,因为正在使用该工件:%s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "无法将工件状态从 %(source)s 更改为 %(target)s" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "工件超过存储配额:%s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "工件没有属性 %(prop)s" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "工件状态无法从 %(curr)s 更改为 %(to)s" #, python-format msgid "Artifact storage media is full: %s" msgstr "工件存储介质已满:%s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "名称为“%(name)s”且版本为“%(version)s”的工件类型未知" msgid "Artifact with a circular dependency can not be created" msgstr "无法创建具有循环依赖性的工件" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "不可访问标识为 %(id)s 的工件" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "找不到标识为 %(id)s 的工件" msgid "Artifact with the specified type, name and version already exists" msgstr "已存在具有所指定类型、名称和版本的工件" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "具有所指定类型、名称和版本的工件已经具有直接依赖关系 %(dep)s" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "具有所指定类型、名称和版本的工件已经具有传递依赖关系 %(dep)s" msgid "Attempt to set readonly property" msgstr "尝试设置只读属性" msgid "Attempt to set value of immutable property" msgstr "尝试设置不可变属性的值" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "请尝试上载重复映像:%s" msgid "Attempted to update Location field for an image not in queued status." msgstr "已尝试更新处于未排队状态的映像的“位置”字段。" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "属性“%(property)s”是只读的。" #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "属性“%(property)s”已保留。" #, python-format msgid "Attribute '%s' is read-only." msgstr "属性“%s”是只读的。" #, python-format msgid "Attribute '%s' is reserved." msgstr "属性“%s”已保留。" msgid "Attribute container_format can be only replaced for a queued image." msgstr "只能为已排队的映像替换属性 container_format。" msgid "Attribute disk_format can be only replaced for a queued image." msgstr "只能为已排队的映像替换属性 disk_format。" msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "针对 Swift 认证服务认证的用户的认证密钥。(不推荐)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "找不到 URL %(url)s 处的授权服务。" #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "认证错误 - 文件上传期间此令牌可能已到期。正在删除 %s 的映像数据。" msgid "Authorization failed." msgstr "授权失败。" msgid "Available categories:" msgstr "可用的类别:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "无效“%s”查询过滤器格式。请使用 ISO 8601 日期时间注释。" #, python-format msgid "Bad Command: %s" msgstr "命令 %s 不正确" #, python-format msgid "Bad header: %(header_name)s" msgstr "头 %(header_name)s 不正确" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "传递至过滤器 %(filter)s 的值不正确,已获取 %(val)s" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "S3 URI %(uri)s 的格式不正确" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Swift URI 中凭证“%(creds)s”的格式不正确" msgid "Badly formed credentials in Swift URI." msgstr "Swift URI 中凭证的格式不正确。" msgid "Base directory that the image cache uses." msgstr "映像高速缓存使用的基本目录。" msgid "BinaryObject property cannot be declared mutable" msgstr "不能将 BinaryObject 属性声明为可变" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "BLOB %(name)s 不能具有多个值。" msgid "Blob size is not set" msgstr "未设置 BLOB 大小" msgid "Body expected in request." msgstr "请求中需要主体。" msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "不能同时指定 file 和 legacy_image_id" msgid "CA certificate file to use to verify connecting clients." msgstr "要用于验证连接客户机的 CA 证书文件。" msgid "Cannot be a negative value" msgstr "不能为负值" msgid "Cannot be a negative value." msgstr "不得为负值。" #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "无法将映像 %(key)s“%(value)s”转换为整数。" msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "不得使用保留名称“metadata”来声明工件属性" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "无法装入工件“%(name)s”" msgid "Cannot remove last location in the image." msgstr "不能移除映像中的最后一个位置。" #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "无法为镜像%(image_id)s保存数据: %(error)s" msgid "Cannot set locations to empty list." msgstr "不能将位置设置为空列表。" msgid "Cannot specify 'max_size' explicitly" msgstr "无法显式指定“max_size”" msgid "Cannot specify 'min_size' explicitly" msgstr "无法显式指定“min_size”" msgid "Cannot upload to an unqueued image" msgstr "无法上载至未排队的映像" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "无法将此参数与运算符 %(op)s 配合使用" msgid "Certificate file to use when starting API server securely." msgstr "安全启动 API 服务器时要使用的证书文件。" #, python-format msgid "Certificate format not supported: %s" msgstr "证书格式不受支持:%s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "证书在以下时间之后无效:%s UTC" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "证书在以下时间之前无效:%s UTC" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "校验和验证失败。已异常中止映像“%s”的高速缓存。" msgid "Client disconnected before sending all data to backend" msgstr "客户端在发送所有数据到后端时断开了连接" msgid "Command not found" msgstr "找不到命令" msgid "Configuration option was not valid" msgstr "配置选项无效" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "发生连接错误,或者对 URL %(url)s 处的授权服务的请求不正确。" #, python-format msgid "Constructed URL: %s" msgstr "已构造 URL:%s" msgid "Container format is not specified." msgstr "未指定容器格式。" msgid "Content-Type must be application/octet-stream" msgstr "Content-Type 必须是 application/octet-stream" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "对于映像 %(image_id)s,映像下载已损坏" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "在尝试时间达到 30 秒之后未能绑定至 %(host)s:%(port)s" msgid "Could not find OVF file in OVA archive file." msgstr "在 OVA 归档文件中找不到 OVF 文件。" #, python-format msgid "Could not find metadata object %s" msgstr "找不到元数据对象 %s" #, python-format msgid "Could not find metadata tag %s" msgstr "找不到元数据标记 %s" #, python-format msgid "Could not find namespace %s" msgstr "找不到名称空间 %s" #, python-format msgid "Could not find property %s" msgstr "找不到属性 %s" msgid "Could not find required configuration option" msgstr "找不到必需的配置选项" #, python-format msgid "Could not find task %s" msgstr "找不到任务 %s" #, python-format msgid "Could not update image: %s" msgstr "未能更新映像:%s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "当前包含多个磁盘的 OVA 包不受支持。" msgid "Custom validators list should contain tuples '(function, message)'" msgstr "定制验证程序列表应当包含元组“(function, message)”" #, python-format msgid "Data for image_id not found: %s" msgstr "找不到 image_id 的数据:%s" msgid "Data supplied was not valid." msgstr "提供的数据无效。" msgid "Date and time of image member creation" msgstr "创建映像成员的日期和时间" msgid "Date and time of image registration" msgstr "注册映像的日期和时间" msgid "Date and time of last modification of image member" msgstr "最近一次修改映像成员的日期和时间" msgid "Date and time of namespace creation" msgstr "创建名称空间的日期和时间" msgid "Date and time of object creation" msgstr "创建对象的日期和时间" msgid "Date and time of resource type association" msgstr "关联资源类型的日期和时间" msgid "Date and time of tag creation" msgstr "创建标记的日期和时间" msgid "Date and time of the last image modification" msgstr "最近一次修改映像的日期和时间" msgid "Date and time of the last namespace modification" msgstr "最近一次修改名称空间的日期和时间" msgid "Date and time of the last object modification" msgstr "最近一次修改对象的日期和时间" msgid "Date and time of the last resource type association modification" msgstr "最近一次修改资源类型关联的日期和时间" msgid "Date and time of the last tag modification" msgstr "最近一次修改标记的日期和时间" msgid "Datetime when this resource was created" msgstr "此资源的创建日期时间" msgid "Datetime when this resource was updated" msgstr "此资源的更新日期时间" msgid "Datetime when this resource would be subject to removal" msgstr "将会移除此资源的日期时间" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "请求所返回的项数的缺省值(如果未在该请求中显式指定)" msgid "Default value is invalid" msgstr "缺省值无效" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "正在拒绝尝试上载工件,因为它超过了配额:%s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "正在拒绝尝试上载映像,因为它超过配额:%s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "正在拒绝尝试上载大小超过 %d 字节的映像。" #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "必须首先删除依赖关系属性“%s”" msgid "Dependency relations cannot be mutable" msgstr "依赖关系不得可变" msgid "Deploy the v1 OpenStack Images API." msgstr "请部署 OpenStack Images API V1。" msgid "Deploy the v1 OpenStack Registry API." msgstr "请部署 OpenStack Registry API V1。" msgid "Deploy the v2 OpenStack Images API." msgstr "请部署 OpenStack Images API V2。" msgid "Deploy the v2 OpenStack Registry API." msgstr "请部署 OpenStack Registry API V2。" msgid "Descriptive name for the image" msgstr "映像的描述性名称" msgid "Dictionary contains unexpected key(s)" msgstr "字典包含意外的关键字" msgid "Dictionary size is greater than maximum" msgstr "字典大小大于最大值" msgid "Dictionary size is less than minimum" msgstr "字典大小小于最小值" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "将对数字签名使用的摘要算法。使用“openssl list-message-digest-algorithms”命令" "以获取该平台上的 OpenSSL 版本支持的算法。例如,“sha1”、“sha256”、“sha512”等" "等。" msgid "Disk format is not specified." msgstr "未指定磁盘格式。" msgid "Does not match pattern" msgstr "与模式不匹配" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "未能正确配置驱动程序 %(driver_name)s。原因:%(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "必须指定 file 或者 legacy_image_id" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "对请求解码时出错。Glance 无法对 URL 或请求主体包含的字符进行解码。" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "访存映像 %(image_id)s 的成员时出错:%(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "存储配置中出错。已禁用将工件添加至存储器。" msgid "Error in store configuration. Adding images to store is disabled." msgstr "存储配置中出错。已禁止将映像添加至存储器。" msgid "Error occurred while creating the verifier" msgstr "创建验证程序时发生了错误" msgid "Error occurred while verifying the signature" msgstr "验证签名时发生了错误。" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "成员应为以下格式:{\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "状态应为以下格式:{\"status\": \"status\"}" msgid "External source should not be empty" msgstr "外部源不应为空。" #, python-format msgid "External sources are not supported: '%s'" msgstr "外部源不受支持:“%s”" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "未能激活映像。发生错误:%s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "未能添加映像元数据。发生错误:%s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "找不到要删除的工件 %(artifact_id)s" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "未能找到要删除的映像 %(image_id)s" #, python-format msgid "Failed to find image to delete: %s" msgstr "未能找到要删除的映像:%s" #, python-format msgid "Failed to find image to update: %s" msgstr "找不到要更新的映像:%s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "找不到要删除的资源类型 %(resourcetype)s" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "未能初始化映像高速缓存数据库。发生错误:%s" #, python-format msgid "Failed to read %s from config" msgstr "未能从配置读取 %s" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "未能保留映像。发生错误:%s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "未能更新映像元数据。发生错误:%s" #, python-format msgid "Failed to upload image %s" msgstr "上传镜像 %s失败" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "由于 HTTP 错误,未能上载映像 %(image_id)s 的映像数据:%(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "由于内部错误,未能上载映像 %(image_id)s 的映像数据:%(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "文件 %(path)s 具有无效支持文件 %(bfile)s,正在异常中止。" msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "不允许基于文件的导入。请使用映像数据的非本地源。" msgid "File too large" msgstr "文件太大" msgid "File too small" msgstr "文件太小" msgid "Forbidden image access" msgstr "禁止访问映像" #, python-format msgid "Forbidden to delete a %s image." msgstr "已禁止对映像%s进行删除。" #, python-format msgid "Forbidden to delete image: %s" msgstr "已禁止删除映像:%s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "禁止修改 %(status)s 映像的“%(key)s”" #, python-format msgid "Forbidden to modify '%s' of image." msgstr "已禁止修改映像的“%s”。" msgid "Forbidden to reserve image." msgstr "已禁止保留映像。" msgid "Forbidden to update deleted image." msgstr "已禁止更新删除的映像。" #, python-format msgid "Forbidden to update image: %s" msgstr "已禁止更新映像:%s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "已禁止进行上载尝试:%s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "正在禁止请求,元数据定义名称空间 %s 不可视。" #, python-format msgid "Forbidding request, task %s is not visible" msgstr "正在禁止请求,任务 %s 不可视" msgid "Format of the container" msgstr "容器的格式" msgid "Format of the disk" msgstr "磁盘格式" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "获取 BLOB %(name)s 数据失败:%(err)s。" #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "获取镜像%(id)s数据失败: %(err)s。" msgid "Glance client not installed" msgstr "未安装 Glance 客户端" #, python-format msgid "Host \"%s\" is not valid." msgstr "主机“%s”无效。" #, python-format msgid "Host and port \"%s\" is not valid." msgstr "主机和端口“%s”无效。" msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "人工可读的信息性消息,仅在适当时(通常在发生故障时)才包括" msgid "If False doesn't trace SQL requests." msgstr "如果为 false,那么不要跟踪 SQL 请求。" msgid "If False fully disable profiling feature." msgstr "如果为 false,那么完全禁用概要分析功能。" msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "如果为 False,那么服务器将返回头“Connection: close”,如果为 True,那么服务器" "将在其响应中返回“Connection: Keep-Alive”。为了在响应已发送并且由客户机成功读" "取之后显式关闭客户机套接字连接,那么当创建 wsgi 服务器时,仅必须将此选项设置" "为 False。" msgid "If true, image will not be deletable." msgstr "如果为 true,那么映像将不可删除。" msgid "If true, namespace will not be deletable." msgstr "如果为 true,那么名称空间将不可删除。" #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "映像 %(id)s 未能删除,因为它正在使用中:%(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "找不到映像 %(id)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "镜像%(image_id)s上传后无法找到。镜像在上传过程中可能被删除: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "映像 %(image_id)s 受保护,无法删除。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "在上载之后,找不到映像 %s。可能已在上载期间删除该映像,正在清除已上载的区块。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "上传后找不到映像 %s。此映像可能已在上传期间删除。" #, python-format msgid "Image %s is deactivated" msgstr "映像 %s 已取消激活" #, python-format msgid "Image %s is not active" msgstr "映像 %s 处于不活动状态" #, python-format msgid "Image %s not found." msgstr "找不到映像 %s " #, python-format msgid "Image exceeds the storage quota: %s" msgstr "镜像超出存储限额: %s" msgid "Image id is required." msgstr "需要映像标识。" msgid "Image is protected" msgstr "映像受保护" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "对于映像 %(id)s,超过映像成员限制:%(e)s:" #, python-format msgid "Image name too long: %d" msgstr "映像名称太长:%d" msgid "Image operation conflicts" msgstr "映像操作发生冲突" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "不允许映像状态从 %(cur_status)s 转变为 %(new_status)s" #, python-format msgid "Image storage media is full: %s" msgstr "映像存储介质已满:%s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "对于映像 %(id)s,超过映像标记限制:%(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "发生映像上载问题:%s" #, python-format msgid "Image with identifier %s already exists!" msgstr "具有标识 %s 的映像已存在!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "已删除具有标识 %s 的映像。" #, python-format msgid "Image with identifier %s not found" msgstr "找不到具有标识 %s 的映像" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "找不到具有所给定标识 %(image_id)s 的映像" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "授权策略不正确,期望的是“%(expected)s”,但接收到的是“%(received)s”" #, python-format msgid "Incorrect request: %s" msgstr "以下请求不正确:%s" msgid "Index is out of range" msgstr "索引超出范围" msgid "Index is required" msgstr "需要索引" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "输入没有包含“%(key)s”字段" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "对工件存储介质没有足够的权限:%s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "对映像存储介质的许可权不足:%s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "无效 Content-Type,无法与 %s 配合使用" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "这个资源无效的JSON指针: '/%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "无效证书格式:%s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "校验和“%s”无效:不得超过 32 个字符" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift 配置文件中的配置无效。" msgid "Invalid configuration in property protection file." msgstr "属性保护文件中的配置无效。" #, python-format msgid "Invalid container format '%s' for image." msgstr "对于映像,容器格式“%s”无效。" #, python-format msgid "Invalid content type %(content_type)s" msgstr "内容类型 %(content_type)s 无效" msgid "Invalid dict property type" msgstr "无效的字典属性类型" msgid "Invalid dict property type specification" msgstr "指定了无效的字典属性类型" #, python-format msgid "Invalid disk format '%s' for image." msgstr "对于映像,磁盘格式“%s”无效。" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "无效过滤器值 %s。缺少右引号。" #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "无效过滤器值 %s。右引号之后没有逗号。" #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "无效过滤器值 %s。左引号之前没有逗号。" #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "无效头“Content-Type”:%s" msgid "Invalid image id format" msgstr "映像标识格式无效" msgid "Invalid item type specification" msgstr "指定了无效的项类型" #, python-format msgid "Invalid json body: %s" msgstr "无效 JSON 主体:%s" msgid "Invalid jsonpatch request" msgstr "jsonpatch 请求无效" msgid "Invalid location" msgstr "无效的位置" #, python-format msgid "Invalid location %s" msgstr "位置 %s 无效" #, python-format msgid "Invalid location: %s" msgstr "以下位置无效:%s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "location_strategy 选项 %(name)s 无效。有效策略选项如下:%(strategies)s" msgid "Invalid locations" msgstr "无效的位置" #, python-format msgid "Invalid locations: %s" msgstr "无效的位置:%s" msgid "Invalid marker format" msgstr "标记符格式无效" msgid "Invalid marker. Image could not be found." msgstr "标记符无效。找不到映像。" #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "无效 mask_gen_algorithm:%s" #, python-format msgid "Invalid membership association: %s" msgstr "成员资格关联无效:%s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "磁盘格式与容器格式的混合无效。将磁盘格式或容器格式设置" "为“aki”、“ari”或“ami”时,容器格式与磁盘格式必须匹配。" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "操作“%(op)s”无效。它必须是下列其中一项:%(available)s。" msgid "Invalid position for adding a location." msgstr "用于添加位置 (location) 的位置 (position) 无效。" msgid "Invalid position for removing a location." msgstr "用于移除位置 (location) 的位置 (position) 无效。" msgid "Invalid property definition" msgstr "属性定义无效" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "无效 pss_salt_length:%s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "签名密钥类型的无效公钥类型:%s" msgid "Invalid reference list specification" msgstr "指定的引用列表无效" msgid "Invalid referenced type" msgstr "所引用的类型无效" msgid "Invalid request PATCH for work with blob" msgstr "无效请求 PATCH,无法与 blob 配合使用" msgid "Invalid service catalog json." msgstr "服务目录 json 无效。" #, python-format msgid "Invalid signature hash method: %s" msgstr "无效签名散列方法:%s" #, python-format msgid "Invalid signature key type: %s" msgstr "无效签名密钥类型:%s" #, python-format msgid "Invalid sort direction: %s" msgstr "排序方向无效:%s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "排序键 %(sort_key)s 无效。如果未设置类型版本,那么它必须是下列其中一项:" "%(available)s。" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "以下排序键无效:%(sort_key)s。它必须是下列其中一项:%(available)s。" #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "排序键 %(sort_key)s 无效。您无法按此属性进行排序" #, python-format msgid "Invalid status value: %s" msgstr "状态值 %s 无效" #, python-format msgid "Invalid status: %s" msgstr "状态无效:%s" #, python-format msgid "Invalid time format for %s." msgstr "对于 %s,此时间格式无效。" msgid "Invalid type definition" msgstr "类型定义无效" #, python-format msgid "Invalid type value: %s" msgstr "类型值 %s 无效" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "更新无效。它将导致出现重复的元数据定义名称空间,该名称空间具有同一名称 %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义对象," "该对象具有同一名称 %(name)s。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义对象," "该对象具有同一名称 %(name)s。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "更新无效。它将导致在名称空间 %(namespace_name)s 中出现重复的元数据定义属性," "该属性具有同一名称 %(name)s。" #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "参数“%(param)s”的值“%(value)s”无效:%(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "选项 %(option)s 的以下值无效:%(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "可视性值无效:%s" msgid "Is not allowed value" msgstr "不是允许值" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "在设置 %s=“yes”之前,eventlet 模块似乎已导入。如果使用 ipv6,那么当前必须禁" "用 eventlet.greendns,因为 eventlet.greendns 当前与 ipv6 地址不兼容。在设置此" "项之前,请确保没有导入 eventlet。" msgid "It's invalid to provide multiple image sources." msgstr "提供多个镜像源无效" msgid "It's not allowed to add locations if locations are invisible." msgstr "不允许添加不可视的位置。" msgid "It's not allowed to remove locations if locations are invisible." msgstr "不允许移除不可视的位置。" msgid "It's not allowed to update locations if locations are invisible." msgstr "不允许更新不可视的位置。" msgid "Items have to be unique" msgstr "项必须唯一" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "JSON 路径应当以“/”开头,不以“/”结尾,不允许存在两个后续的“/”。" msgid "Legacy image was not found" msgstr "找不到旧映像" msgid "Length is greater than maximum" msgstr "长度大于最大值" msgid "Length is less than minimum" msgstr "长度小于最小值" msgid "Limit param must be an integer" msgstr "Limit 参数必须为整数" msgid "Limit param must be positive" msgstr "Limit 参数必须为正数" #, python-format msgid "Limit param must not be higher than %d" msgstr "Limit 参数不得大于 %d" msgid "Limits request ID length." msgstr "限制请求标识的长度。" msgid "List definitions may hot have defaults" msgstr "列表定义可能没有缺省值" msgid "List of strings related to the image" msgstr "与映像相关的字符串的列表" msgid "List size is greater than maximum" msgstr "列表大小大于最大值" msgid "List size is less than minimum" msgstr "列表大小小于最小值" msgid "Loop time between checking for new items to schedule for delete." msgstr "在检查新项(以安排进行删除)之间的循环时间。" #, python-format msgid "Malformed Content-Range header: %s" msgstr "以下 Content-Range 头的格式不正确:%s" msgid "Malformed JSON in request body." msgstr "请求主体中 JSON 的格式不正确。" msgid "Max string length may not exceed 255 characters" msgstr "最大字符串长度不能超过 255 个字符" msgid "Maximal age is count of days since epoch." msgstr "最大年龄是自新纪元开始计算的天数。" msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "要接受的消息头的最大行大小。将大型令牌(通常是由 Keystone V3 API 生成的那些令" "牌)与大型服务目录配合使用时,可能需要增大 max_header_line" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "每个映像的最大映像成员数。负值视为无限。" msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "映像上允许的最大位置数。负值视为无限。" msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "映像上允许的最大属性数。负值视为无限。" msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "映像上允许的最大标记数。负值视为无限。" msgid "Maximum permissible number of items that could be returned by a request" msgstr "可由请求返回的最大允许项数" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "已超过最大重定向次数 (%(redirects)s)。" msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "用户可上载的最大映像大小(以字节计)。缺省为 1099511627776 字节 (1 TB)。警" "告:此值仅应该在仔细考虑之后增大并且必须设置为小于 8 EB " "(9223372036854775808) 的值。" #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "对于映像 %(image_id)s,已复制成员 %(member_id)s" msgid "Member can't be empty" msgstr "成员不能为空" msgid "Member to be added not specified" msgstr "未指定要添加的成员" msgid "Membership could not be found." msgstr "找不到成员资格。" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "元数据定义名称空间 %(namespace)s 受保护,无法删除。" #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "对于标识 %s,找不到元数据定义名称空间" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "找不到元数据定义名称空间 %(namespace_name)s。" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "元数据定义对象 %(object_name)s 受保护,无法删除。" #, python-format msgid "Metadata definition object not found for id=%s" msgstr "对于标识 %s,找不到元数据定义对象" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "元数据定义属性 %(property_name)s 受保护,无法删除。" #, python-format msgid "Metadata definition property not found for id=%s" msgstr "对于标识 %s,找不到元数据定义属性" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "元数据定义资源类型 %(resource_type_name)s 是种子型系统类型,无法删除。" #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "元数据定义资源类型关联 %(resource_type)s 受保护,无法删除。" #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "元数据定义标记 %(tag_name)s 受保护,无法删除。" #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "对于标识 %s,找不到元数据定义标记" msgid "Min string length may not be negative" msgstr "最小字符串长度不能为负数" msgid "Minimal rows limit is 1." msgstr "最小行数限制为 1。" #, python-format msgid "Missing required credential: %(required)s" msgstr "缺少必需凭证:%(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "对于区域 %(region)s,存在多个“映像”服务匹配项。这通常意味着需要区域并且尚未提" "供一个区域。" msgid "Must supply a positive, non-zero value for age." msgstr "必须提供非零正值来表示年龄。" msgid "Name of the paste configuration file." msgstr "粘贴配置文件的名称。" #, python-format msgid "No artifact found with ID %s" msgstr "找不到标识为 %s 的工件" msgid "No authenticated user" msgstr "不存在任何已认证的用户" #, python-format msgid "No image found with ID %s" msgstr "找不到任何具有标识 %s 的映像" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "在映像 %(img)s 中找不到标识为 %(loc)s 的位置" msgid "No permission to share that image" msgstr "不存在任何用于共享该映像的许可权" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "尚未装入“%(name)s”的插件" msgid "No property to access" msgstr "没有要访问的属性" #, python-format msgid "No such key '%s' in a dict" msgstr "字典中没有这样的键“%s”" #, python-format msgid "Not a blob property '%s'" msgstr "不是 BLOB 属性“%s”" msgid "Not a downloadable entity" msgstr "不是可下载的实体" msgid "Not a list property" msgstr "不是列表属性" #, python-format msgid "Not a list property '%s'" msgstr "不是列表属性“%s”" msgid "Not a valid value type" msgstr "不是有效的值类型" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "并非所有依赖项都处于“%s”状态" #, python-format msgid "Not allowed to create members for image %s." msgstr "不允许为映像 %s 创建成员。" #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "不允许取消激活状态为“%s”的映像" #, python-format msgid "Not allowed to delete members for image %s." msgstr "不允许为映像 %s 删除成员。" #, python-format msgid "Not allowed to delete tags for image %s." msgstr "不允许为映像 %s 删除标记。" #, python-format msgid "Not allowed to list members for image %s." msgstr "不允许为映像 %s 列示成员。" #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "不允许重新激活状态为“%s”的映像" #, python-format msgid "Not allowed to update members for image %s." msgstr "不允许为映像 %s 更新成员。" #, python-format msgid "Not allowed to update tags for image %s." msgstr "不允许为映像 %s 更新标记。" #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "不允许为镜像%(image_id)s上传数据:%(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "不是阵列索引“%s”" msgid "Number of sort dirs does not match the number of sort keys" msgstr "排序方向数与排序键数不匹配" msgid "OVA extract is limited to admin" msgstr "OVA 抽取操作仅限管理员执行" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "未在 ovf-metadata.json 配置文件中指定干系 OVF 元数据。请将“cim_pasd”设置为干" "系 CIM_ProcessorAllocationSettingData 属性的列表。" msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "找不到 OVF 属性配置文件“ovf-metadata.json”。" msgid "Old and new sorting syntax cannot be combined" msgstr "无法组合新旧排序语法" msgid "Only list indexes are allowed for blob lists" msgstr "BLOB 列表中仅允许存在列表索引" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "操作“%s”需要名为“value”的成员。" msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "操作对象必须刚好包含一个名为“add”、“remove”或“replace”的成员。" msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "操作对象必须仅包含一个名为“add”、“remove”或“replace”的成员。" msgid "Operations must be JSON objects." msgstr "操作必须是 JSON 对象。" #, python-format msgid "Operator %(op)s is not supported" msgstr "运算符 %(op)s 不受支持" #, python-format msgid "Original locations is not empty: %s" msgstr "原位置不为空: %s" msgid "Owner can't be updated by non admin." msgstr "非管理员无法更新所有者。" msgid "Owner must be specified to create a tag." msgstr "必须指定所有者,才能创建标记。" msgid "Owner of the image" msgstr "映像的所有者" msgid "Owner of the namespace." msgstr "名称空间的所有者。" msgid "Param values can't contain 4 byte unicode." msgstr "参数值不能包含 4 字节 Unicode。" msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "在移除了服务名称的情况下,粘贴配置文件中管道的部分名称。例如,如果粘贴节名称" "为 [pipeline:glance-api-keystone],请使用值“keystone”" msgid "Path to the directory where json metadata files are stored" msgstr "用于存储 json 元数据文件的目录的路径" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "插件名称“%(plugin)s”应当与工件类型名称“%(name)s”相匹配" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "指针“%s”包含并非可识别转义序列的一部分的“~”。" #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "指针`%s` 包含连接符\"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "指针`%s` 没有包含有效的口令" #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "指针“%s”没有以“/”开头。" #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "指针`%s` 以\"/\"结束." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "指针包含“~”,它不是可识别的转义序列[~0, ~1] 的一部分。" #, python-format msgid "Port \"%s\" is not valid." msgstr "端口“%s”无效。" msgid "Port the registry server is listening on." msgstr "注册表服务器正在侦听的端口。" #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "预发行数字组件太大(最大值 %d 个字符)" msgid "Private key file to use when starting API server securely." msgstr "安全启动 API 服务器时要使用的专用密钥文件。" #, python-format msgid "Process %d not running" msgstr "进程 %d 未在运行" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "必须在保存数据之前设置属性 %s。" #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "属性 %(property_name)s 未以需要的资源类型关联前缀“%(prefix)s”开头。" #, python-format msgid "Property %s already present." msgstr "属性 %s 已存在。" #, python-format msgid "Property %s does not exist." msgstr "属性 %s 不存在。" #, python-format msgid "Property %s may not be removed." msgstr "无法除去属性 %s。" #, python-format msgid "Property %s must be set prior to saving data." msgstr "必须在保存数据之前设置属性 %s。" #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "属性“%(name)s”可能没有值“%(val)s”:%(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "属性“%s”受保护" msgid "Property names can't contain 4 byte unicode." msgstr "属性名称不能包含 4 字节 Unicode。" #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "找不到对操作 %(operation)s(对于规则 %(rule)s)的属性保护。将不允许任何角色执" "行此操作。" #, python-format msgid "Property's %(prop)s value has not been found" msgstr "尚未找到属性 %(prop)s 的值" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "提供的映像大小必须与存储的映像大小匹配。(提供的大小为 %(ps)d,存储的大小为 " "%(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "提供的对象与模式“%(schema)s”不匹配:%(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "不支持任务的所提供状态:%(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "不支持任务的所提供类型:%(type)s" msgid "Provides a user friendly description of the namespace." msgstr "提供名称空间的用户友好描述。" msgid "Public images do not have members." msgstr "公共映像没有成员。" msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "要用于版本端点的公共 URL。缺省值为“无”,这将使用请求的 host_url 属性来填充基" "本 URL。如果 Glance 正在代理后面运行,那么您会希望将此项更改为表示该代理的 " "URL。" msgid "Python module path of data access API" msgstr "数据访问 API 的 Python 模块路径" msgid "Received invalid HTTP redirect." msgstr "接收到无效 HTTP 重定向。" #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "对于授权,正在重定向至 %(uri)s。" #, python-format msgid "Registry service can't use %s" msgstr "注册服务无法使用 %s" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "API 服务器上未正确配置注册表。原因:%(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "关系 %(name)s 不能具有多个值。" #, python-format msgid "Reload of %(serv)s not supported" msgstr "不支持重新装入 %(serv)s" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "正在重新装入 %(serv)s(pid 为 %(pid)s),信号为 (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "移除原有pid文件%s" msgid "Request body must be a JSON array of operation objects." msgstr "请求主体必须是由操作对象组成的 JSON 数组。" msgid "Request must be a list of commands" msgstr "请求必须为命令列表" msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "签名认证所需的映像属性不存在。无法验证签名。" #, python-format msgid "Required store %s is invalid" msgstr "必需的存储器 %s 无效" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "资源类型名称应该尽可能与 Heat 资源类型对齐:http://docs.openstack.org/" "developer/heat/template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "来自 Keystone 的响应没有包含 Glance 端点。" msgid "Role used to identify an authenticated user as administrator." msgstr "一种角色,用来确定该已认证用户是管理员。" msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "请作为长时间运行的过程运行。当未指定(缺省值)时,运行擦除操作一次,然后退" "出。当已指定时,不退出,并按 wakeup_time 时间间隔(如配置中所指定)运行擦除。" msgid "Scope of image accessibility" msgstr "映像辅助功能选项的作用域" msgid "Scope of namespace accessibility." msgstr "名称空间辅助功能选项的作用域。" msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "用于签署 Glance API 和 Glance 注册表服务跟踪消息的私钥。" #, python-format msgid "Server %(serv)s is stopped" msgstr "服务器 %(serv)s 已停止" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "服务器工作程序创建失败:%(reason)s。" msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "请为每个用户设置系统范围配额。此值是用户可在所有存储系统上使用的总字节数。值" "为 0 表示无限制。可对此值指定可选单位。接受的单位为B、KB、MB、GB 及 TB,分别" "表示字节、千字节、兆字节、吉字节及太字节。如果未指定单位,那么采用字节。请注" "意,值与单位之间不应有任何空格,并且单位区分大小写。" #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "显示级别 %(shl)s 在此操作中不受支持" msgid "Signature verification failed" msgstr "签名认证失败" msgid "Signature verification failed." msgstr "签名验证失败。" msgid "Size of image file in bytes" msgstr "映像文件的大小,以字节计" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "一些资源类型允许每个实例具有多个“键/值”对。例如,Cinder 允许卷上的用户元数据" "和映像元数据。仅映像属性元数据是通过 Nova(调度或驱动程序)求值。此属性允许名" "称空间目标除去不确定性。" msgid "Sort direction supplied was not valid." msgstr "提供的排序方向无效。" msgid "Sort key supplied was not valid." msgstr "提供的排序键无效。" msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "指定要用于给定的资源类型的前缀。当应用于指定的资源类型时,名称空间中的任何属" "性都应该使用此前缀作为前缀。必须包括前缀分隔符(例如冒号 :)。" msgid "Specifies which task executor to be used to run the task scripts." msgstr "指定要使用哪个任务执行器运行任务脚本。" msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "状态必须为“暂挂”、“已接受”或“已拒绝”。" msgid "Status not specified" msgstr "未指定状态" msgid "Status of the image" msgstr "映像的状态" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "不允许状态从 %(cur_status)s 转变为 %(new_status)s" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "正在通过信号 (%(sig)s) 停止 %(serv)s (pid %(pid)s)" #, python-format msgid "Store for image_id not found: %s" msgstr "找不到用于 image_id 的存储器:%s" #, python-format msgid "Store for scheme %s not found" msgstr "找不到用于方案 %s 的存储器" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "提供的 %(attr)s (%(supplied)s) 与所上载映像 (%(actual)s) 生成的 %(attr)s 不匹" "配。正在将映像状态设置为“已终止”。" msgid "Supported values for the 'container_format' image attribute" msgstr "“container_format”映像属性支持的值" msgid "Supported values for the 'disk_format' image attribute" msgstr "“disk_format”映像属性支持的值" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "已阻止重新衍生,因为 %(serv)s 为 %(rsn)s。" msgid "System SIGHUP signal received." msgstr "接收到系统 SIGHUP 信号。" #, python-format msgid "Task '%s' is required" msgstr "需要任务“%s”" msgid "Task does not exist" msgstr "任务不存在" msgid "Task failed due to Internal Error" msgstr "由于发生内部错误而导致任务失败" msgid "Task was not configured properly" msgstr "任务未正确配置" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "找不到具有给定标识 %(task_id)s 的任务" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "“changes-since”过滤器在 v2 上不再可用。" #, python-format msgid "The CA file you specified %s does not exist" msgstr "已指定的 CA 文件 %s 不存在" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "此任务 %(task_id)s 正在创建的映像 %(image_id)s 对象不再处于有效状态,无法进一" "步处理。" msgid "The Store URI was malformed." msgstr "存储器 URI 的格式不正确。" msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "keystone 服务的 URL。如果“use_user_token”没有生效并且正在使用 keystone 认证," "那么可指定 keystone 的 URL。" msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "Swift 认证服务正在侦听的地址。(不推荐)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "管理员密码。如果“use_user_token”没有生效,那么可指定管理凭证。" msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "管理员用户名。如果“use_user_token”没有生效,那么可指定管理凭证。" msgid "The amount of time in seconds to delay before performing a delete." msgstr "用于在执行删除之前延迟的时间量(以秒计)。" msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "允许非完整映像保留在高速缓存中的时长,经过此时长后,高速缓存清除程序(如果正" "在运行)将移除非完整映像。" msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "创建 TCP 侦听器套接字时将使用的储备值。" #, python-format msgid "The cert file you specified %s does not exist" msgstr "已指定的证书文件 %s 不存在" msgid "The config file that has the swift account(s)configs." msgstr "具有 swift 帐户配置的配置文件。" msgid "The current status of this task" msgstr "此任务的当前状态" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "存放映像高速缓存目录 %(image_cache_dir)s 的设备不支持 xattr。您可能需要编辑 " "fstab 并将 user_xattr 选项添加至存放该高速缓存目录的设备的相应行。" msgid "The driver to use for image cache management." msgstr "要用于映像高速缓存管理的驱动程序。" #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "版本 %s 的格式无效。使用 semver 注释" msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "映像将自动转换为的格式。如果使用 RBD 后端,那么此项应设置为“原始”" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "给定的 URI 无效。请从受支持的 URI %(supported)s 的以下列表中指定有效 URI" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "pydev 进程正在侦听调试连接的主机名/IP" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "映像 %s 已存在于从属服务器上,但是我们针对它进行的检查找不到该映像。这指示我" "们没有许可权来查看从属服务器上的所有映像。" #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "传入的工件 BLOB 太大:%s" #, python-format msgid "The incoming image is too large: %s" msgstr "引入的映像太大:%s" #, python-format msgid "The key file you specified %s does not exist" msgstr "已指定的密钥文件 %s 不存在" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "已超过关于允许的映像位置数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "已超过关于允许的映像成员数(对于此映像)的限制。已尝试:%(attempted)s,最大" "值:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "已超过关于允许的映像属性数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "已超过关于允许的映像属性数的限制。已尝试:%(num)s,最大值:%(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "已超过关于允许的映像标记数的限制。已尝试:%(attempted)s,最大值:%(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "位置 %(location)s 已存在" #, python-format msgid "The location data has an invalid ID: %d" msgstr "位置数据具有无效标识:%d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "属性保护文件的位置。此文件包含属性保护的规则及与该文件关联的角色/策略。如果未" "指定此配置值,那么在缺省情况下,将不强制执行属性保护。如果已指定值并且找不到" "该文件,那么 glance-api 服务将不启动。" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "未删除名称为 %(record_name)s 的元数据定义 %(record_type)s。其他记录仍然对其进" "行引用。" #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "元数据定义名称空间 %(namespace_name)s 已存在。" #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,找不到名称为 %(object_name)s 的元数据定义对" "象。" #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,找不到名称为 %(property_name)s 的元数据定义" "属性。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "已存在以下两者的元数据定义资源类型关联:资源类型 %(resource_type_name)s 与名" "称空间 %(namespace_name)s。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "找不到以下两者的元数据定义资源类型关联:资源类型 %(resource_type_name)s 与名" "称空间 %(namespace_name)s。" #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "找不到名称为 %(resource_type_name)s 的元数据定义资源类型。" #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "在名称空间 %(namespace_name)s 中,找不到名称为 %(name)s 的元数据定义标记。" msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "将用于运行引擎的方式。可以是“串行”或“并行”。" msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "为处理服务请求而创建的子进程工作程序的数目。缺省值将等于可用 CPU 数。" msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "由引擎同时执行的并行活动数。当引擎方式为“并行”时,该值可大于 1。" msgid "The parameters required by task, JSON blob" msgstr "任务 JSON blob 所需的参数" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "在与注册表服务器的 SSL 连接中要使用的证书文件的路径(如果有)。或者,可将环境" "变量 GLANCE_CLIENT_CERT_FILE 设置为该 CA 证书文件的文件路径" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "在与注册表服务器的 SSL 连接中要使用的认证中心证书文件的路径(如果有)。或者," "可将环境变量 GLANCE_CLIENT_CA_FILE 设置为该 CA 证书文件的文件路径。" msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "在与注册表服务器的 SSL 连接中要使用的密钥文件的路径(如果有)。或者,可将环境" "变量 GLANCE_CLIENT_KEY_FILE 设置为该密钥文件的文件路径" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "将用于映像高速缓存管理的 sqlite 文件数据库的路径。" msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "时间段(以秒计),API 服务器将等待该时间段以让注册表请求完成。值为 0 意味着没" "有超时。" msgid "The port on which a pydev process is listening for connections." msgstr "pydev 进程正在侦听连接的端口。" msgid "The port on which the server will listen." msgstr "服务器将侦听的端口。" msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "要用于与注册表服务器通信的协议。HTTP 或 HTTPS。" #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "所提供的主体 %(body)s 在所给定的模式 %(schema)s 下无效" msgid "The provided image is too large." msgstr "提供的映像太大。" #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "所提供的路径“%(path)s”无效:%(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "对用于添加新映像的缺省 swift 帐户/备份存储器参数的引用。 " msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "用于认证服务的区域。如果“use_user_token”没有生效并且正在使用 keystone 认证," "那么可指定区域名称。" msgid "The request returned 500 Internal Server Error." msgstr "该请求返回了“500 内部服务器错误”。" msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "该请求返回了“503 服务不可用”。这通常在服务超负荷或其他瞬态停止运行时发生。" #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "该请求返回了“302 多选项”。这通常意味着您尚未将版本指示器包括在请求 URI 中。\n" "\n" "返回了响应的主体:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "该请求返回了“413 请求实体太大”。这通常意味着已违反比率限制或配额阈值。\n" "\n" "响应主体:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "该请求返回了意外状态:%(status)s。\n" "\n" "响应主体:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "所请求映像已取消激活。已禁止下载映像数据。" msgid "The result of current task, JSON blob" msgstr "当前任务 JSON blob 的结果" msgid "The signature data was not properly encoded using base64" msgstr "未正确地使用 base64 对签名数据进行编码" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "数据大小 %(image_size)s 将超过限制。将剩余 %(remaining)s 个字节。" msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "要用于擦除映像的线程池的大小。缺省值为 1,这表示串行擦除。任何大于 1 的值表示" "可以并行擦除的最大映像数。" #, python-format msgid "The specified member %s could not be found" msgstr "找不到指定的成员 %s" #, python-format msgid "The specified metadata object %s could not be found" msgstr "找不到指定的元数据对象 %s" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "找不到指定的元数据标记 %s" #, python-format msgid "The specified namespace %s could not be found" msgstr "找不到指定的名称空间 %s" #, python-format msgid "The specified property %s could not be found" msgstr "找不到指定的属性 %s" #, python-format msgid "The specified resource type %s could not be found " msgstr "找不到指定的资源类型 %s" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "已删除映像位置的状态只能设置为“pending_delete”或“deleted”" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "已删除映像位置的状态只能设置为“pending_delete”或“deleted”。" msgid "The status of this image member" msgstr "此映像成员的状态" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "要用于获取存储首选顺序的存储器名称。名称必须由“stores”配置选项定义的其中一个" "存储器注册。当发生以下情况时,将应用此选项:将“store_type”选项用作" "由“location_strategy”配置选项定义的映像位置策略。" msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "要用于认证的策略。如果“use_user_token”没有生效,那么可指定认证策略。" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "目标成员 %(member_id)s 已关联映像 %(image_id)s。" msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "管理用户的租户名称。如果“use_user_token”没有生效,那么可指定管理员租户名称。" msgid "The type of task represented by this content" msgstr "此内容表示的任务的类型" msgid "The unique namespace text." msgstr "唯一名称空间文本。" msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "上限(累计高速缓存的最大大小,以字节计),当超过该值时,高速缓存修剪程序(如" "果正在运行)会开始清除映像高速缓存。" msgid "The user friendly name for the namespace. Used by UI if available." msgstr "名称空间的用户友好名称。由 UI 使用(如果可用)。" msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "要针对 Swift 认证服务认证的用户。(不推荐)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "套接字选项 TCP_KEEPIDLE 的值。这是在 TCP 开始发送保持活动探测器之前连接必须处" "于空闲状态的时间(以秒计)。" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "%(error_key_name)s %(error_filename)s 存在问题。请对它进行验证。发生错误:" "%(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "%(error_key_name)s %(error_filename)s 存在问题。请对它进行验证。发生 OpenSSL " "错误:%(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "密钥对存在问题。请验证证书 %(cert_file)s 和密钥 %(key_file)s 是否应该在一起。" "发生 OpenSSL 错误 %(ce)s" msgid "There was an error configuring the client." msgstr "配置客户机时出错。" msgid "There was an error connecting to a server" msgstr "连接至服务器时出错" msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "此配置值指示在属性保护文件中是使用了“角色”还是“策略”。" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "当前不允许对 Glance 任务执行此操作。到达基于 expires_at 属性的时间后,它们会" "自动删除。" msgid "This operation is currently not permitted on Glance images details." msgstr "当前不允许对 Glance 映像详细信息执行此操作。" msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "此值设置将使用哪个策略确定映像位置顺序。当前,已用 " "Glance“location_order”和“store_type”封装两个策略。" msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "任务在成功或失败之后生存的时间(以小时计)" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "客户机连接的套接字操作的超时。如果入局连接处于空闲状态的时间达到此秒数,那么" "它将关闭。如果值为“0”,那么意味着永久等待。" msgid "Too few arguments." msgstr "太少参数" msgid "Too few locations" msgstr "位置太少" msgid "Too many locations" msgstr "位置太多" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "总大小为 %(size)d 字节(在 %(img_count)d 个映像上)" msgid "Turn on/off delayed delete." msgstr "请打开/关闭延迟删除。" msgid "Type version has to be a valid semver string" msgstr "类型版本必须是有效的 semver 字符串" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "URI 不能包含方案的多个实例。如果已指定类似于 swift://user:pass@http://" "authurl.com/v1/container/obj 的 URI,那么需要将它更改为使用 swift+http:// 方" "案,类似于以下:swift+http://user:pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "用于访问外部存储器中保留的映像文件的 URL" msgid "Unable to PUT to non-empty collection" msgstr "无法放入非空集合" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "无法创建 pid 文件 %(pid)s。正在以非 root 用户身份运行吗?\n" "正在回退至临时文件,可使用以下命令停止 %(service)s 服务:\n" "%(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "无法按未知运算符“%s”进行过滤。" msgid "Unable to filter on a range with a non-numeric value." msgstr "无法对具有非数字值的范围进行过滤。" msgid "Unable to filter on a unknown operator." msgstr "无法针对未知运算符进行过滤。" msgid "Unable to filter using the specified operator." msgstr "无法使用指定运算符进行过滤。" msgid "Unable to filter using the specified range." msgstr "无法使用指定的范围进行过滤。" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "在 JSON 模式更改中找不到“%s”" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "在 JSON 模式更改中找不到“op”。它必须是下列其中一项:%(available)s。" msgid "Unable to get legacy image" msgstr "无法获取旧映像" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "无法增大文件描述符限制。正在以非 root 用户身份运行吗?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "无法从配置文件 %(conf_file)s 装入 %(app_name)s。\n" "发生错误:%(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "无法装入模式:%(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "对于 %s,找不到粘贴配置文件。" msgid "Unable to modify collection in immutable or readonly property" msgstr "无法修改不可改变的属性或者只读属性中的集合" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "无法检索标识为 %s 的证书" msgid "Unable to retrieve request id from context" msgstr "无法从上下文检索到请求标识" msgid "Unable to specify artifact type explicitly" msgstr "无法显式指定工件类型" msgid "Unable to specify artifact type version explicitly" msgstr "无法显式指定工件类型版本" msgid "Unable to specify version if multiple types are possible" msgstr "如果可能存在多种类型,那么无法指定版本" msgid "Unable to specify version if type is not specified" msgstr "如果未指定类型,那么无法指定版本" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "无法为镜像%(image_id)s上传重复的数据: %(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "无法验证签名,因为该算法在此系统上不受支持。" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "无法验证签名:%(reason)s" msgid "Unauthorized image access" msgstr "无权访问映像" msgid "Unexpected body type. Expected list/dict." msgstr "意外主体类型。应该为 list/dict。" #, python-format msgid "Unexpected response: %s" msgstr "接收到意外响应:%s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "授权策略“%s”未知" #, python-format msgid "Unknown command: %s" msgstr "未知命令%s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "排序方向未知,必须为“降序”或“升序”" msgid "Unrecognized JSON Schema draft version" msgstr "无法识别 JSON 模式草稿版本" msgid "Unrecognized changes-since value" msgstr "无法识别 changes-since 值" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "sort_dir 不受支持。可接受值:%s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "sort_key 不受支持。可接受值:%s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "值 %(value)d 超出范围,不得超过 %(max)d" msgid "Value is greater than maximum" msgstr "值大于最大值" msgid "Value is less than minimum" msgstr "值小于最小值" msgid "Value is required" msgstr "值是必需的" #, python-format msgid "Version component is too large (%d max)" msgstr "版本组件太大(最大值 %d)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "版本无效:%(reason)s" msgid "Virtual size of image in bytes" msgstr "映像的虚拟大小,以字节计" msgid "Visibility must be either \"public\" or \"private\"" msgstr "可视性必须为“public”或“private”" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "用来等待 pid %(pid)s (%(file)s) 终止的时间已达到 15 秒;正在放弃" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "当值为 false 时,无论 available_plugins 如何,都无法装入任何工件。当值为 " "true 时,可以装入工件。" msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "以 SSL 方式运行服务器时,必须在配置文件中同时指定 cert_file 和 key_file 选项" "值" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "当这个选项为true时,会把镜像的所有权赋予租户。否则的话,镜像的所有权属于正在" "发出请求的已认证用户。" msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "在与注册表服务器的连接中使用 SSL 时,不需要通过认证中心进行验证。这是注册表的" "行为,等价于在命令行上指定 --insecure(将 glanceclient 用于 API)。" msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "是否允许用户指定超出映像模式所提供的内容的映像属性" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "映像属性中是否包含后端映像位置。例如,如果使用文件系统存储器,那么将" "在“direct_url”字段中对用户返回以下 URL:file:///path/to/image。公开存储位置可" "能会产生安全风险,所以应谨慎使用此设置!将此项设置为 true 将覆盖 " "show_image_direct_url 选项。" msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "是否将后端映像存储位置包括在映像属性中。显示存储位置会造成安全风险,因此请谨" "慎使用此设置!" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "向注册表进行请求时,是否传递包含用户和租户信息的头。这允许注册表使用上下文中" "间件而不使用 keystonemiddleware 的 auth_token 中间件,从而移除对 keystone 认" "证服务的调用。建议当使用此选项时,通过 auth_token 中间件以外的其他方式来确保 " "Glance api 与 Glance 注册表之间的通信安全。" msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "向注册表进行请求时是否传递用户令牌。为了防止在上载大文件期间因令牌到期而产生" "故障,建议将此参数设置为 False。如果“use_user_token”未生效,那么可以指定管理" "凭证。" msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "用于异步任务操作的工作目录。此处设置的目录将用来对映像进行操作 - 通常在它们导" "入到目标存储器中之前。当提供工作目录时,请确保提供足够空间,以便并发任务高效" "运行而不出现空间不足的情况。通过将“max_workers”数 - 或正在运行的工作程序数 - " "乘以平均映像大小(例如 500MB)来进行粗略估算。可根据部署中的平均大小进行映像" "大小估算。请注意,根据正在运行的任务,您可能需要将此数字乘以某个因子(它取决" "于任务执行的操作)。例如,如果已启用映像转换,那么可能希望使可用大小加倍。综" "上所述,请记住这些仅是估算,并且您应该根据最差情况完成这些估算,并且准备好应" "对估算错误的情况。" #, python-format msgid "Wrong command structure: %s" msgstr "命令结构 %s 不正确" msgid "You are not authenticated." msgstr "您未经认证。" msgid "You are not authorized to complete this action." msgstr "您无权完成此操作。" #, python-format msgid "You are not authorized to lookup image %s." msgstr "未授权您查询映像 %s。" #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "未授权您查询映像 %s 的成员。" #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "不允许在由“%s”拥有的名称空间中创建标记" msgid "You are not permitted to create image members for the image." msgstr "不允许为映像创建映像成员。" #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "不允许创建由“%s”拥有的映像。" #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "不允许创建由“%s”拥有的名称空间" #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "不允许创建由“%s”拥有的对象" #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "不允许创建由“%s”拥有的属性" #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "不允许创建由“%s”拥有的 resource_type" #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "不允许采用以下身份作为所有者来创建此任务:%s" msgid "You are not permitted to deactivate this image." msgstr "不允许取消激活此映像。" msgid "You are not permitted to delete this image." msgstr "不允许删除此映像。" msgid "You are not permitted to delete this meta_resource_type." msgstr "你不被允许删除meta_resource_type。" msgid "You are not permitted to delete this namespace." msgstr "不允许删除此名称空间。" msgid "You are not permitted to delete this object." msgstr "你不被允许删除这个对象。" msgid "You are not permitted to delete this property." msgstr "不允许删除此属性。" msgid "You are not permitted to delete this tag." msgstr "不允许删除此标记。" #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "不允许对此 %(resource)s 修改“%(attr)s”。" #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "不允许对此映像修改“%s”。" msgid "You are not permitted to modify locations for this image." msgstr "不允许为此映像修改位置。" msgid "You are not permitted to modify tags on this image." msgstr "不允许对此映像修改标记。" msgid "You are not permitted to modify this image." msgstr "不允许修改此映像。" msgid "You are not permitted to reactivate this image." msgstr "不允许重新激活此映像。" msgid "You are not permitted to set status on this task." msgstr "你不被允许设置这个任务的状态。" msgid "You are not permitted to update this namespace." msgstr "不允许更新此名称空间。" msgid "You are not permitted to update this object." msgstr "你不被允许更新这个对象。" msgid "You are not permitted to update this property." msgstr "不允许更新此属性。" msgid "You are not permitted to update this tag." msgstr "不允许更新此标记。" msgid "You are not permitted to upload data for this image." msgstr "不允许为此映像上载数据。" #, python-format msgid "You cannot add image member for %s" msgstr "无法为 %s 添加映像成员" #, python-format msgid "You cannot delete image member for %s" msgstr "无法为 %s 删除映像成员" #, python-format msgid "You cannot get image member for %s" msgstr "无法为 %s 获取映像成员" #, python-format msgid "You cannot update image member %s" msgstr "无法更新映像成员 %s" msgid "You do not own this image" msgstr "您未拥有此映像" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "已选择在连接中使用 SSL,并且已提供证书,但是未能提供 key_file 参数或设置 " "GLANCE_CLIENT_KEY_FILE 环境变量" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "已选择在连接中使用 SSL,并且已提供密钥,但是未能提供 cert_file 参数或设置 " "GLANCE_CLIENT_CERT_FILE 环境变量" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() 已获取意外的关键字自变量“%s”" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "在更新中,无法从 %(current)s 转变为 %(next)s(需要 from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "定制属性 (%(props)s) 与基本基准冲突" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "在此平台上,eventlet“poll”和“selects”主数据中心都不可用" msgid "is_public must be None, True, or False" msgstr "is_public 必须为“无”、True 或 False" msgid "limit param must be an integer" msgstr "limit 参数必须为整数" msgid "limit param must be positive" msgstr "limit 参数必须为正数" #, python-format msgid "location: %s data lost" msgstr "位置:%s 数据已丢失" msgid "md5 hash of image contents." msgstr "映像内容的 md5 散列。" #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() 已获取意外的关键字 %s" msgid "protected must be True, or False" msgstr "protected 必须为 True 或 False" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "无法启动 %(serv)s。发生错误:%(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id 太长,最大大小为 %s" glance-12.0.0/glance/locale/es/0000775000567000056710000000000012701407204017260 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/es/LC_MESSAGES/0000775000567000056710000000000012701407204021045 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/es/LC_MESSAGES/glance-log-warning.po0000664000567000056710000002060312701407047025066 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Adriana Chisco Landazábal , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b2.dev74\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-15 11:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-07-15 12:39+0000\n" "Last-Translator: openstackjenkins \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "" "%(image_id)s: field %(key)s differs (source is %(master_value)s, destination " "is %(slave_value)s)" msgstr "" "%(image_id)s: campo %(key)s difiere (fuente es %(master_value)s, destino es " "%(slave_value)s)" #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "Acceso denegado a la imagen %(id)s aunque se devuelve 'not found'" #, python-format msgid "An optional task has failed, the failure was: %s" msgstr "Ha fallado una tarea opcional, el error fue: %s" #, python-format msgid "Artifact with id=%s is not accessible" msgstr "No se puede acceder al artefacto con id=%s" #, python-format msgid "Artifact with id=%s not found" msgstr "No se encontró artefacto con id=%s" msgid "Artifact with the specified type, name and version already exists" msgstr "Ya existe artefacto con el tipo, nombre versión especificada" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%d" msgstr "" "El artefacto con el tipo, nombre versión especificada ya tiene la " "dependencia directa=%d" #, python-format msgid "" "Artifact with the specified type, name and versions already has the direct " "dependency=%s" msgstr "" "El artefacto con el tipo, nombre versión especificada ya tiene la " "dependencia directa=%s " msgid "Attempted to modify image user did not own." msgstr "" "Se ha intentado modificar una imagen que no era propiedad del usuario. " #, python-format msgid "Cached image file '%s' doesn't exist, unable to delete" msgstr "" "El archivo de imagen almacenado en memoria caché '%s' no existe, no se puede " "suprimir" #, python-format msgid "Can't load artifact %s: load disabled in config" msgstr "" "No se puede cargar artefacto %s: la carga no está habilitada en la " "configuración" #, python-format msgid "Can't load artifact %s: not in available_plugins list" msgstr "No se puede cargar artefacto %s: no en lista available_plugins" #, python-format msgid "Could not find image %s" msgstr "No se ha podido encontrar la imagen %s" #, python-format msgid "" "Could not find schema properties file %s. Continuing without custom " "properties" msgstr "" "No se ha podido encontrar el archivo de propiedades del esquema %s. Se va a " "continuar sin las propiedades personalizadas" #, python-format msgid "Could not find task %s" msgstr "No se encontró tarea %s" #, python-format msgid "Could not find task info %s" msgstr "No se encontró información de tarea %s" msgid "Deadlock detected. Retrying..." msgstr "Punto muerto detectado. Intentando de nuevo..." #, python-format msgid "Duplicate entry for values: %s" msgstr "Conexión para valores duplicada: %s" #, python-format msgid "" "Failed to activate image %s in registry. About to delete image bits from " "store and update status to 'killed'." msgstr "" "No se pudo activar imagen %s en registro. A punto de eliminar bits de imagen " "del almacen y actualizar estado a 'killed'." #, python-format msgid "Failed to decrypt location value for image %(image_id)s" msgstr "Error al decodificar valor de ubicación para imagen %(image_id)s" #, python-format msgid "Failed to delete blob %s in store from URI" msgstr "No se eliminó objeto %s en almacén de URI" #, python-format msgid "Failed to delete file %(path)s. Got error: %(e)s" msgstr "No se ha eliminado fichero %(path)s. Tuvo error: %(e)s" #, python-format msgid "Failed to delete image %s in store from URI" msgstr "No se eliminó imagen %s en almacén de URI" #, python-format msgid "Failed to find task %(task_id)s. Reason: %(reason)s" msgstr "No se encontró tarea %(task_id)s. Razón: %(reason)s" msgid "Failed to successfully cache all images in queue." msgstr "" "No se ha podido almacenar en memoria caché satisfactoriamente todas las " "imágenes en cola." #, python-format msgid "" "Fetch of cache file failed (%(e)s), rolling back by moving " "'%(incomplete_path)s' to '%(invalid_path)s'" msgstr "" "Se ha encontrado un error en la captación del archivo de memoria caché " "(%(e)s), se va a retrotraer moviendo '%(incomplete_path)s' a " "'%(invalid_path)s'" #, python-format msgid "Forbidden to create task. Reason: %(reason)s" msgstr "Se olvidó crear tarea. Razón: %(reason)s" #, python-format msgid "Forbidden to get task %(task_id)s. Reason: %(reason)s" msgstr "Se olvidó obtener tarea %(task_id)s. Razón: %(reason)s" msgid "Id not in sort_keys; is sort_keys unique?" msgstr "Id no está en sort_keys; ¿es sort_keys exclusivo?" #, python-format msgid "Image %s entirely missing from the destination" msgstr "Falta la totalidad de la imagen %s del destino" #, python-format msgid "Image '%s' is not active. Not caching." msgstr "La imagen '%s' no está activa. No almacenando en memoria caché. " #, python-format msgid "" "Image cache driver '%(driver_module)s' failed to configure. Got error: " "'%(config_err)s" msgstr "" "El controlador de caché de imágenes '%(driver_module)s' no se ha podido " "configurar. Se ha obtenido el error: '%(config_err)s" #, python-format msgid "" "Image cache driver '%(driver_name)s' failed to load. Got error: " "'%(import_err)s." msgstr "" "El controlador de caché de imágenes '%(driver_name)s' no se ha podido " "cargar. Se ha obtenido el error: '%(import_err)s." #, python-format msgid "Invalid marker. Image %(id)s could not be found." msgstr "Marcador no válido. No se puede encontrar imagen %(id)s." #, python-format msgid "Invalid membership association specified for image %(id)s" msgstr "" "Se ha especificado una asociación de pertenencia no válida para la imagen " "%(id)s" #, python-format msgid "Member %(id)s not found" msgstr "No se ha encontrado el miembro %(id)s" #, python-format msgid "Metadata tag not found for id %s" msgstr "No se ha encontrado etiqueta de metadatos para id%s" #, python-format msgid "No metadata found for image '%s'" msgstr "No se han encontrado metadatos para la imagen '%s' " #, python-format msgid "Show level %s is not supported in this operation" msgstr "EL nivel de vista %s no es soportado por esta operación" #, python-format msgid "Task %(task_id)s failed with exception %(error)s" msgstr "Tarea %(task_id)s falló con exceepción %(error)s" msgid "The `eventlet` executor has been deprecated. Use `taskflow` instead." msgstr "" "Se ha desaprobado el uso del ejecutor `eventlet`. En su lugar use `taskflow`." #, python-format msgid "" "The conversion format is None, please add a value for it in the config file " "for this task to work: %s" msgstr "" "No hay formato de conversión, por favor agregue un valor para éste en el " "fichero de configuración para esta tarea en el trabajo: %s" msgid "Unable to get deleted image" msgstr "No se ha podido obtener una imagen suprimida" #, python-format msgid "Unable to get deleted task %s" msgstr "No se puede obtener tarea eliminada %s" msgid "Unable to get unowned image" msgstr "No se ha podido obtener una imagen que no es de su propiedad" #, python-format msgid "Unrecognised child %s" msgstr "Hijo %s no reconocido" #, python-format msgid "" "User %(user)s attempted to upload an image of size %(size)d that will exceed " "the quota. %(remaining)d bytes remaining." msgstr "" "Usuario %(user)s intentó cargar una imagen de tamaño %(size)d que excede la " "capacidad. %(remaining)d bytes restantes." #, python-format msgid "" "User %(user)s attempted to upload an image of unknown size that will exceed " "the quota. %(remaining)d bytes remaining." msgstr "" "Usuario %(user)s intentó cargar una imagen de tamaño desconocido que excede " "la capacidad. %(remaining)d bytes restantes." #, python-format msgid "User lacks permission to share image %(id)s" msgstr "El usuario no tiene permiso para compartir la imagen %(id)s" glance-12.0.0/glance/locale/es/LC_MESSAGES/glance-log-info.po0000664000567000056710000002140112701407047024351 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Adriana Chisco Landazábal , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b2.dev74\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-15 11:52+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-06-23 01:20+0000\n" "Last-Translator: openstackjenkins \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "%(task_id)s of %(task_type)s completed" msgstr "%(task_id)s de %(task_type)s completada" msgid "" "'metadata_encryption_key' was not specified in the config file or a config " "file was not specified. This means that this migration is a NOOP." msgstr "" "'metadata_encryption_key' no se ha especificado en el archivo de " "configuración, o no se ha especificado ningún archivo de configuración. Esto " "significa que esta migración es NOOP." #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "Acceso denegado a la imagen %(id)s aunque se devuelve 'not found'" msgid "All workers have terminated. Exiting" msgstr "Todos los trabajadores han terminado. Saliendo" #, python-format msgid "Artifact %s has been successfully loaded" msgstr "El artefacto %s se ha cargado correctamente" #, python-format msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgstr "Llamando a %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgid "Caught keyboard interrupt. Exiting." msgstr "Se ha generado interrupción de teclado. Saliendo." #, python-format msgid "Child %d exiting normally" msgstr "El hijo %d está saliendo de forma normal" #, python-format msgid "Cleaning up %s after exceeding the quota" msgstr "Limpiando %s luego de exceder la capacidad" #, python-format msgid "Cleaning up %s after exceeding the quota." msgstr "Limpiando %s luego de exceder la capacidad." #, python-format msgid "Considering: %s" msgstr "Considerando: %s" #, python-format msgid "Could not find artifact %s" msgstr "No se pudo encontrar artefacto %s" msgid "Daemon Shutdown on KeyboardInterrupt" msgstr "Conclusión de daemon en KeyboardInterrupt" msgid "Defaulting to SQLite driver." msgstr "Se toma de forma predeterminada el controlador SQLite." #, python-format msgid "Delete denied for public image %(id)s" msgstr "Se ha denegado la supresión para la imagen pública %(id)s" #, python-format msgid "File %s loaded to database." msgstr "Se ha cargado fichero %s a la base de datos." #, python-format msgid "Image %(id)s not found" msgstr "No se ha encontrado la imagen %(id)s" #, python-format msgid "Image %s is being synced" msgstr "Se está sincronizando imagen %s" #, python-format msgid "Image %s is deactivated" msgstr "Se ha desactivado la imagen %s" #, python-format msgid "Image %s is reactivated" msgstr "Se ha reactivado la imagen %s" #, python-format msgid "Image %s metadata has changed" msgstr "Metadatos de imagen %s ha cambiado" #, python-format msgid "Image cache loaded driver '%s'." msgstr "La memoria caché de imagen ha cargado el controlador '%s'." msgid "Initialized gzip middleware" msgstr "Se ha iniciado gzip middleware" msgid "Initialized image cache management middleware" msgstr "Middleware de gestión de memoria caché de imágenes inicializado" msgid "Initialized image cache middleware" msgstr "Middleware de memoria caché de imágenes inicializado" #, python-format msgid "Initializing scrubber with configuration: %s" msgstr "Iniciando depurador con configuración: %s" #, python-format msgid "" "Loading known task scripts for task_id %(task_id)s of type %(task_type)s" msgstr "" "Cargando scripts de tarea reconocidos para task_id %(task_id)s de tipo " "%(task_type)s" msgid "Metadata loading finished" msgstr "Ha concluido la carga de metadatos" #, python-format msgid "Namespace %(namespace)s saved in %(file)s" msgstr "Se ha guardado espacio de nombre %(namespace)s en %(file)s" #, python-format msgid "Not queueing image '%s'. Already being written to cache" msgstr "" "No se está poniendo en cola la imagen '%s'. Ya se está grabando en la " "memoria caché " #, python-format msgid "Not queueing image '%s'. Already cached." msgstr "" "No se está poniendo en cola la imagen '%s'. Ya está almacenada en memoria " "caché. " #, python-format msgid "Not queueing image '%s'. Already queued." msgstr "No se está poniendo en cola la imagen '%s'. Ya está en cola. " #, python-format msgid "Overwriting namespace %s" msgstr "Reemplazando espacio de nombre %s" #, python-format msgid "Reaped %(reaped)s %(entry_type)s cache entries" msgstr "Se han obtenido %(reaped)s entradas de memoria caché %(entry_type)s " #, python-format msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" msgstr "" "Rechazando la solicitud de creación de imagen para el ID de imagen no válido " "'%(bad_id)s'" #, python-format msgid "Removed dead child %s" msgstr "Se ha eliminado hijo muerto %s" #, python-format msgid "Removed invalid cache file %s" msgstr "Se ha eliminado el archivo de memoria caché no válido %s" #, python-format msgid "Removed stale child %s" msgstr "Se ha eliminado hijo obsoleto %s" #, python-format msgid "Removed stalled cache file %s" msgstr "Se ha eliminado el archivo de memoria caché colgado %s" #, python-format msgid "Returning %(funcname)s: %(output)s" msgstr "Devolviendo %(funcname)s: %(output)s" #, python-format msgid "Scrubbing image %(id)s from %(count)d locations." msgstr "Depurando imagen %(id)s de ubicaciones %(count)d." #, python-format msgid "Skipping namespace %s. It already exists in the database." msgstr "Omitiendo nombre de espacio %s. Ya existe en la base de datos." #, python-format msgid "Started child %s" msgstr "Se ha iniciado el hijo %s" #, python-format msgid "Starting %d workers" msgstr "Iniciando %d trabajadores" #, python-format msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgstr "Iniciando daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgid "Starting single process server" msgstr "Iniciando servidor de proceso individual" #, python-format msgid "Storing: %s" msgstr "Almacenamiento: %s" #, python-format msgid "Successfully cached all %d images" msgstr "" "Se han almacenado satisfactoriamente en memoria caché todas las %d imágenes" #, python-format msgid "Successfully created image %(id)s" msgstr "La imagen %(id)s se ha creado correctamente" #, python-format msgid "Successfully deleted a membership from image %(id)s" msgstr "Se ha suprimido correctamente una pertenencia desde la imagen %(id)s" #, python-format msgid "Successfully deleted image %(id)s" msgstr "La imagen %(id)s Se ha suprimido correctamente" #, python-format msgid "Successfully updated a membership for image %(id)s" msgstr "Se ha actualizado correctamente una pertenencia para la imagen %(id)s" #, python-format msgid "Successfully updated memberships for image %(id)s" msgstr "" "Se han actualizado correctamente las pertenencias para la imagen %(id)s" #, python-format msgid "Table %s has been cleared" msgstr "Se ha limpiado tabla %s" #, python-format msgid "Task %(task_id)s beginning import execution." msgstr "Comenzando ejecución de importación para tarea %(task_id)s" #, python-format msgid "Task %(task_id)s: Could not import image file %(image_data)s" msgstr "Tarea %(task_id)s: No pudo importar fichero de imagn %(image_data)s" #, python-format msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" msgstr "" "Tarea %(task_id)s: Obtuvo uri de datos de imagen %(data_uri)s para importar" #, python-format msgid "" "Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" msgstr "" "Cambiando estado de tarea [%(task_id)s] de %(cur_status)s a %(new_status)s" msgid "Triggering asynchronous copy from external source" msgstr "Desencadenando copia asíncrona desde origen externo" msgid "Unable to get deleted image" msgstr "No se ha podido obtener una imagen suprimida" #, python-format msgid "Update denied for public image %(id)s" msgstr "Actualización denegada para la imagen pública %(id)s" #, python-format msgid "Updating metadata for image %(id)s" msgstr "Actualizando metadatos para la imagen %(id)s" #, python-format msgid "Uploaded data of image %s from request payload successfully." msgstr "" "Se ha subido de manera exitosa la información de imagen %s de la carga útil " "de solicitud." #, python-format msgid "creating table %(table)s" msgstr "creando la tabla %(table)s" #, python-format msgid "dropping table %(table)s" msgstr "descartando la tabla %(table)s" glance-12.0.0/glance/locale/es/LC_MESSAGES/glance-log-error.po0000664000567000056710000002643712701407047024565 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Adriana Chisco Landazábal , 2015 # Rafael Rivero , 2014-2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev23\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-08 22:12+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-17 02:48+0000\n" "Last-Translator: openstackjenkins \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "An error occurred during image.send notification: %(err)s" msgstr "Se ha producido un error durante la notificación image.send :%(err)s " #, python-format msgid "" "An error occurred reading from backend storage for image %(image_id)s: " "%(err)s" msgstr "" "Se ha producido un error al leer en el almacenamiento del programa de fondo " "para la imagen %(image_id)s: %(err)s" #, python-format msgid "" "Backend storage for image %(image_id)s disconnected after writing only " "%(bytes_written)d bytes" msgstr "" "El almacenamiento del programa de fondo para la imagen %(image_id)s se ha " "desconectado después de grabar solo %(bytes_written)d bytes" #, python-format msgid "Can not get scrub jobs from queue: %s" msgstr "No se puede obtener trabajos de depuración desde cola: %s" msgid "Cannot use this parameter with the operator IN" msgstr "No se puede utilizar este parámetro con el operador IN" #, python-format msgid "Caught error: %s" msgstr "Capturado error: %s" msgid "Checksum header is missing." msgstr "Falta la cabecera de suma de comprobación." #, python-format msgid "Copy from external source '%(scheme)s' failed for image: %(image)s" msgstr "" "Ha fallado la copia desde fuente externa '%(scheme)s' para imagen: %(image)s" #, python-format msgid "Couldn't find property protection file %(file)s: %(error)s." msgstr "" "No se pudo encontrar fichero de protección a propiedad %(file)s: %(error)s." #, python-format msgid "Encountered a malformed property protection rule %(rule)s: %(error)s." msgstr "" "Se encontró una regla de protección a la propiedad %(rule)s: %(error)s " "incorrecta." #, python-format msgid "Error executing SQLite call. Got error: %s" msgstr "Error al ejecutar la llamada SQLite. Se ha obtenido error: %s" msgid "" "Error setting up the debug environment. Verify that the option " "pydev_worker_debug_host is pointing to a valid hostname or IP on which a " "pydev server is listening on the port indicated by pydev_worker_debug_port." msgstr "" "Error configurando el entorno de depuración. Verifique que la opción " "pydev_worker_debug_host señala un nombre de host o IP no válida en la cual " "un servidor pydev está escuchando respestas en el puerto indicado por " "pydev_worker_debug_port." #, python-format msgid "Error: cooperative_iter exception %s" msgstr "Error: excepción de cooperative_iter %s" #, python-format msgid "" "Exception encountered while tee'ing image '%(image_id)s' into cache: " "%(error)s. Continuing with response." msgstr "" "Se ha encontrado una excepción al colocar la imagen '%(image_id)s' en la " "memoria caché: %(error)s. Continuando con la respuesta. " #, python-format msgid "Failed to delete blob %(blob_id)s from store: %(exc)s" msgstr "No se borró objeto %(blob_id)s del almacén: %(exc)s" #, python-format msgid "Failed to delete image %(image_id)s from store: %(exc)s" msgstr "Error al eliminar imagen %(image_id)s de almacén: %(exc)s" #, python-format msgid "Failed to delete namespace %(namespace)s " msgstr "No se eliminó el espacio de nombre %(namespace)s" #, python-format msgid "Failed to execute introspection %(task_id)s: %(exc)s" msgstr "Error al ejecutar introspección %(task_id)s: %(exc)s" #, python-format msgid "Failed to execute task %(task_id)s: %(exc)s" msgstr "Error al ejecutar la tarea %(task_id)s: %(exc)s" #, python-format msgid "Failed to load location strategy module %(module)s: %(e)s" msgstr "Error al cargar módulo de estrategia de localización %(module)s: %(e)s" #, python-format msgid "Failed to load the %s executor provided in the config." msgstr "Error al cargar el ejecutor %s en la configuración." #, python-format msgid "Failed to save task %(task_id)s in DB as task_repo is %(task_repo)s" msgstr "" "Error al guardar tarea %(task_id)s en base de datos como task_repo es " "%(task_repo)s" msgid "Failed to upload artifact blob data due to HTTP error" msgstr "" "Se ha encontrado un error en la carga de datos de objetos artefactos debido " "a un error HTTP" msgid "Failed to upload artifact blob data due to internal error" msgstr "" "Se ha encontrado un error en la carga de datos de objetos artefactos debido " "a un error interno" msgid "Failed to upload image data due to HTTP error" msgstr "" "Se ha encontrado un error en la carga de datos de imagen debido a un error " "HTTP" msgid "Failed to upload image data due to internal error" msgstr "" "Se ha encontrado un error en la carga de datos de imagen debido a un error " "interno" #, python-format msgid "Forbidden to create resource type. Reason: %(reason)s" msgstr "Se olvidó crear tipo de recurso. Razón: %(reason)s" #, python-format msgid "" "Glance tried all active locations to get data for blob %s but all have " "failed." msgstr "" "Glance intentó obtener datos para el objeto %s desde todas las ubicaciones " "activas pero todas fallaron." #, python-format msgid "" "Glance tried all active locations to get data for image %s but all have " "failed." msgstr "" "Glance intentó obtener datos para la imagen %s de todas las ubicaciones " "activas pero todas fallaron." #, python-format msgid "" "Image cache contained image file for image '%s', however the registry did " "not contain metadata for that image!" msgstr "" "La memoria caché de imágenes contenía el archivo de imagen para la imagen " "'%s', sin embargo, el registro no contenía metadatos para dicha imagen." msgid "Internal error occurred while trying to process task." msgstr "Ocurrió un error interno al intentar procesar la tarea." msgid "Invalid format of swift store config cfg" msgstr "Formato de configuración de almacén de intercambio cfg no válido" #, python-format msgid "Invalid store uri for image: %(image_id)s. Details: %(reason)s" msgstr "Almacén uri para imagene: %(image_id)s no válido. Detalles: %(reason)s" #, python-format msgid "" "Invalid value '%s' for 'property_protection_rule_format'. The permitted " "values are 'roles' and 'policies'" msgstr "" "Valor no válido '%s' para 'property_protection_rule_format'. Los valores " "permitidos son 'roles' y 'policies'" #, python-format msgid "Invalid value for option user_storage_quota: %(users_quota)s" msgstr "Opción para user_storage_quota: %(users_quota)s no válida" #, python-format msgid "Json schema files not found in %s. Aborting." msgstr "No se encuentran ficheros de esquema Json en %s. Abortando." #, python-format msgid "" "Malformed property protection rule in [%(prop)s] %(op)s=%(perm)s: '@' and " "'!' are mutually exclusive" msgstr "" "Regla de protección a la propiedad incorrecta en [%(prop)s] %(op)s=%(perm)s: " "'@' y '!' son exclusivos mutuamente" #, python-format msgid "" "Multiple policies '%s' not allowed for a given operation. Policies can be " "combined in the policy file" msgstr "" "No se permiten políticas múltiples '%s' para una operación dada. Las " "políticas pueden combinarse en el fichero de política" #, python-format msgid "Not respawning child %d, cannot recover from termination" msgstr "" "No se va a volver a generar el hijo %d, no se puede recuperar de la " "terminación" #, python-format msgid "Operator %s is not supported" msgstr "No se soporta operador %s" msgid "" "Please provide no more than one option from this list: --prefer_new, --" "overwrite" msgstr "" "Por favor proporcione no más que una opción de esta lista: --prefer_new, --" "overwrite" #, python-format msgid "" "RPC Call Error: %(val)s\n" "%(tb)s" msgstr "" "Error de llamada RPC: %(val)s\n" "%(tb)s" #, python-format msgid "Received HTTP error while uploading image %s" msgstr "Se recibió un error HTTP mientras se cargaba imagen %s" #, python-format msgid "Registry client request %(method)s %(action)s raised %(exc_name)s" msgstr "" "La solicitud de cliente de registro %(method)s %(action)s ha producido " "%(exc_name)s" #, python-format msgid "Task ID %s" msgstr "Identificación de tarea %s" #, python-format msgid "" "Task [%(task_id)s] status failed to change from %(cur_status)s to " "%(new_status)s" msgstr "" "Error al cambiar el estado de tarea [%(task_id)s] de %(cur_status)s a " "%(new_status)s" #, python-format msgid "Task not found for task_id %s" msgstr "No se ha encontrado tarea para task_id %s" #, python-format msgid "" "Task: %(task_id)s failed to import image %(image_id)s to the filesystem." msgstr "" "Tarea: %(task_id)s falló al importar imagen %(image_id)s al sistema de " "fichero." msgid "" "This execution of Tasks is not setup. Please consult the project " "documentation for more information on the executors available." msgstr "" "No está configurada esta ejecución de tarea. Por favor consulte la " "documentación del proyecto en los ejecutores disponibles para más " "información." #, python-format msgid "" "This task type %(task_type)s is not supported by the current deployment of " "Glance. Please refer the documentation provided by OpenStack or your " "operator for more information." msgstr "" "La implementación actual de Glance no soporta ell tipo de tarea " "%(task_type)s. Por favor consulte la documentación proporcionadapor " "OpenStack o a su operador para más información." msgid "" "To use --prefer_new or --overwrite you need to combine of these options with " "--merge option." msgstr "" "Para usar --prefer_new o reemplazar necesita combinar estas opciones con la " "opción --merge." #, python-format msgid "Unable to create image %s" msgstr "No es posible crear imagen %s" #, python-format msgid "Unable to delete image %s" msgstr "No es posible eliminar imagen %s" msgid "Unable to get images" msgstr "No es posible obtener imágenes " #, python-format msgid "Unable to kill image %(id)s: " msgstr "No se puede matar imagen %(id)s:" #, python-format msgid "Unable to load artifacts: %s" msgstr "No se puede cargar artefactos: %s" #, python-format msgid "Unable to restore artifact %(artifact_id)s: %(e)s" msgstr "No se puede restaurar artefacto %(artifact_id)s: %(e)s" #, python-format msgid "Unable to restore image %(image_id)s: %(e)s" msgstr "No es posible restaurar imagen %(image_id)s: %(e)s" #, python-format msgid "Unable to show image %s" msgstr "No es posible mostrar imagen %s" #, python-format msgid "Unable to update image %s" msgstr "No ha sido posible actualizar la imagen %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" #, python-format msgid "could not find %s" msgstr "no se ha podido encontrar %s" #, python-format msgid "swift config file %(conf_file)s:%(exc)s not found" msgstr "" "No se encontró fichero de configuración de transferencia %(conf_file)s:" "%(exc)s " glance-12.0.0/glance/locale/es/LC_MESSAGES/glance.po0000664000567000056710000034501012701407051022641 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Adriana Chisco Landazábal , 2015 # Alfredo Matas , 2015 # Marian Tort , 2015 # Pablo Sanchez , 2015 # OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata # Eugènia Torrella , 2016. #zanata # German Alexis Rivera De La Torre , 2016. #zanata # Marshall Margenau , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-24 10:40+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 07:54+0000\n" "Last-Translator: Eugènia Torrella \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s debe ser una secuencia" #, python-format msgid "%(attribute)s is required" msgstr "se necesita %(attribute)s" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s no debe ser más larga que %(length)i" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s no debe ser más corto que %(length)i " #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s debe coincidir con el patrón %(pattern)s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "Ocurrió excepción %(cls)s en la última llamada a rpc: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "" "No se ha encontrado %(m_id)s en la lista de miembros de la imagen %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "Se esta ejecutando %(serv)s (pid %(pid)s) ..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "Parece que %(serv)s ya se está ejecutando: %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s está registrado como módulo dos veces. %(module)s no se " "encuentra en uso." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s de %(task_type)s no se ha configurado correctamente. No se pudo " "cargar el almacén de sistema de ficheo" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s de %(task_type)s no se ha configurado adecuadamente. Hace falta " "work dir: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)s ing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)s ing %(serv)s con %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Por favor especifique el par host:puerto, en donde el host es una " "dirección IPv4, IPv6, nombre de host o FQDN. Si utiliza una dirección IPv6 " "enciérrela entre paréntesis separados del puerto (por ejemplo \"[fe80::a:b:" "c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s no puede contener caracteres 4 byte unicode." #, python-format msgid "%s is already stopped" msgstr "%s ya se detuvo" #, python-format msgid "%s is stopped" msgstr "%s se ha detenido" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "Valor de '%(param)s' fuera de rango, no debe exceder %(max)d" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "Se necesita la opción --os_auth_url ovariable de ambiente OS_AUTH_URL cuando " "la estrategia de autenticación keystone está habilitada\n" msgid "A body is not expected with this request." msgstr "No se espera un cuerpo en esta solicitud." msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "Una lista de artefactos que están permitidos en el nombre del formato o " "versión del nombre. Una lista vacía significa que se puede subir cualquier " "artefacto." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ya existe el objeto para definición de metadatos de nombre=%(object_name)s " "en espacio de nombre=%(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ya existe la propiedad para definición de metadatos de nombre=" "%(property_name)s en espacio de nombre=%(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Ya existe el tipo de recurso para definición de metadatos=" "%(resource_type_name)s" #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "Ya existe la etiqueta para metadatos de nombre=%(name)s en el espacio de " "nombre=%(namespace_name)s." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Conjunto de URLs para acceder al archivo de imagen se mantiene en un almacén " "externo" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "Clave ES para cifrar metadatos de la 'ubicación' del almacén. Esto incluye, " "si se utiliza, credenciales Swift o S3. Se debe configurar en una serie " "aleatoria de 16, 24 o 32 bytes de longitud " msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" "Dirección para enlazar el servidor. Útil cuando se selecciona una interfaz " "particular de red." msgid "Address to find the registry server." msgstr "La dirección para encontrar el servidor de registro." msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "Permite a los usuarios autenticados acceder a API con privilegio de solo-" "lectura. Solo aplica a usuarios de ContextMiddleware." #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "" "Los valores %s permitidos no son válidos bajo validadores proporcionados" msgid "Amount of disk space (in GB) required to boot image." msgstr "" "Cantidad de espacio de disco (en GB) necesario para la imagen de arranque." msgid "Amount of ram (in MB) required to boot image." msgstr "Cantidad de RAM (en MB) necesario para la imagen de arranque." msgid "An identifier for the image" msgstr "Un identificador para la imagen" msgid "An identifier for the image member (tenantId)" msgstr "Un identificador para el miembro de la imagen (tenantId)" msgid "An identifier for the owner of this task" msgstr "Un identificador para el propietario de esta tarea" msgid "An identifier for the task" msgstr "Un identificador para la tarea" msgid "An image file url" msgstr "La URL de un archivo de imagen" msgid "An image schema url" msgstr "La URL de un esquema imagen" msgid "An image self url" msgstr "La URL propia de una imagen" #, python-format msgid "An image with identifier %s already exists" msgstr "Ya existe una imagen con el identificador %s" msgid "An import task exception occurred" msgstr "Se ha producido una excepción en una tarea de importación" msgid "An object with the same identifier already exists." msgstr "Ya existe un objeto con el mismo identificador." msgid "An object with the same identifier is currently being operated on." msgstr "Ya se está operando un objeto con el mismo identificador." msgid "An object with the specified identifier was not found." msgstr "No se ha encontrado un objeto con el identificador especificado." msgid "An unknown exception occurred" msgstr "Se ha producido una excepción desconocida " msgid "An unknown task exception occurred" msgstr "Se ha producido una excepción desconocida " #, python-format msgid "Array has no element at position %d" msgstr "La matríz no tiene un elemento en la posición %d" msgid "Array property can't have item_type=Array" msgstr "La propiedad de matriz no puede contener item_type=Array" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "No se pudo eliminar artefacto %s porque está en uso: %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "El artefacto no puede cambiar estado de %(source)s a %(target)s" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "El artefacto excede la cuota de almacenamiento: %s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "El artefacto no tiene propiedad %(prop)s" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "El artefacto no puede cambiar estado de %(curr)s a %(to)s" #, python-format msgid "Artifact storage media is full: %s" msgstr "El soporte de almacenamiento de artefactos está lleno: %s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "" "El artefacto de nombre '%(name)s' y versión '%(version)s' es desconocido" msgid "Artifact with a circular dependency can not be created" msgstr "No se puede crear artefacto con dependencia circular" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "No se puede acceder al artefacto con id=%(id)s" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "No se encontró artefacto identificado como=%(id)s" msgid "Artifact with the specified type, name and version already exists" msgstr "Ya existe artefacto con el tipo, nombre versión especificada" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "El artefacto con el tipo, nombre versión especificada ya tiene la " "dependencia directa=%(dep)s" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "El artefacto con el tipo, nombre versión especificada ya tiene la " "dependencia transitiva=%(dep)s" msgid "Attempt to set readonly property" msgstr "Intento de configurar la propiedad solo-lectura" msgid "Attempt to set value of immutable property" msgstr "Intento de configurar valor de la propiedad no permutable" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "Se ha intentado subir imagen duplicada: %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "Se ha intentado actualizar el campo de ubicación para una imagen que no está " "en estado de cola." #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "El atributo '%(property)s' es de sólo lectura." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "El atributo '%(property)s' está reservado." #, python-format msgid "Attribute '%s' is read-only." msgstr "El atributo '%s' es de solo lectura." #, python-format msgid "Attribute '%s' is reserved." msgstr "El atributo '%s' está reservado." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "El atributo container_format solo se puede reemplazar por una imagen en cola." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "El atributo isk_format solo se puede remplazar con una imagen en cola." msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" "La clave de autenticación para el usuario que se autentica con relación al " "servicio de autenticación Swift es escuchado (rechazado)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "No se ha encontrado el servicio de autorización en el URL %(url)s." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Error de autenticación: es posible que el token haya caducado durante la " "carga de archivos. Borrando los datos de imagen de %s." msgid "Authorization failed." msgstr "Ha fallado la autorización." msgid "Available categories:" msgstr "Categorías disponibles:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Formato de filtro de consulta \"%s\" incorrecto. Utilice la notación de " "DateTime de la ISO 8601." #, python-format msgid "Bad Command: %s" msgstr "Comando incorrecto: %s" #, python-format msgid "Bad header: %(header_name)s" msgstr "Cabecera incorrecta: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "Valores incorrectos pasaron al filtro %(filter)s se obtuvo %(val)s" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "La URI S3 se realizó de manera incorrecta: %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Credenciales formadas incorrectamente '%(creds)s' en URI de Swift" msgid "Badly formed credentials in Swift URI." msgstr "Credenciales con formato incorrecto en URI de Swift." msgid "Base directory that the image cache uses." msgstr "El directorio base que utiliza la memoria caché de imágenes." msgid "BinaryObject property cannot be declared mutable" msgstr "La propiedad BinaryObject no se puede declarar mutable" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "Es probable que el objeto %(name)s no contenga valores múltiples" msgid "Blob size is not set" msgstr "El tamaño del objeto no está establecido" msgid "Body expected in request." msgstr "Se esperaba un cuerpo en la solicitud." msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "No se debe especificar un fichero y legacy_image_id al mismo tiempo" msgid "CA certificate file to use to verify connecting clients." msgstr "" "Fichero de certificado CA a utilizar para verificar la conexión de clientes." msgid "Cannot be a negative value" msgstr "No puede ser un valor negativo" msgid "Cannot be a negative value." msgstr "No puede ser un valor negativo." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "No se puede convertir imagen %(key)s '%(value)s' en un entero." msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "" "No se puede declarar propiedad de artefacto con el nombre 'metadatos' " "reservado" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "No se puede cargar artefacto '%(name)s'" msgid "Cannot remove last location in the image." msgstr "No se puede eliminar la última ubicación de la imagen." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "No se pueden guardar los datos para la imagen %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "No se puede definir ubicaciones como una lista vacía." msgid "Cannot specify 'max_size' explicitly" msgstr "No se puede especificar 'max_size' de manera explícita" msgid "Cannot specify 'min_size' explicitly" msgstr "No se puede especificar 'min_size' de manera explícita" msgid "Cannot upload to an unqueued image" msgstr "No se puede subir a una imagen en cola" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "No se puede utilizar este parámetro con el operador %(op)s" msgid "Certificate file to use when starting API server securely." msgstr "" "Fichero de certificado a usar cuando de inicia servidor API con seguridad." #, python-format msgid "Certificate format not supported: %s" msgstr "Formato de certificado no soportado: %s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "El certificado no es válido después de: %s UTC" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "El certificado no es válido antes de: %s UTC" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Se ha encontrado un error en la verificación de la suma de comprobación. Se " "ha abortado el almacenamiento en memoria caché de la imagen '%s'." msgid "Client disconnected before sending all data to backend" msgstr "El cliente se desconecto antes de enviar todos los datos a backend" msgid "Command not found" msgstr "Comando no encontrado" msgid "Configuration option was not valid" msgstr "La opción de configuración no era válida " #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Solicitud incorrecta/error de conexión a servicio de autorización en el URL " "%(url)s." #, python-format msgid "Constructed URL: %s" msgstr "URL construido : %s" msgid "Container format is not specified." msgstr "No se especificó el formato de contenedor." msgid "Content-Type must be application/octet-stream" msgstr "El tipo de contenido debe ser aplicación/serie de octetos" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Descarga de imagen corrupta para imagen %(image_id)s " #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "No se ha podido enlazar con %(host)s:%(port)s después de intentarlo durante " "30 segundos" msgid "Could not find OVF file in OVA archive file." msgstr "No se ha podido encontrar el archivo OVF en el archivo archivador OVA" #, python-format msgid "Could not find metadata object %s" msgstr "No se pudo encontrar el objeto de metadatos %s" #, python-format msgid "Could not find metadata tag %s" msgstr "No se pudo encontrar la etiqueta de metadatos %s" #, python-format msgid "Could not find namespace %s" msgstr "No se ha podido encontrar el espacio de nombre %s" #, python-format msgid "Could not find property %s" msgstr "No se pudo encontrar propiedad %s" msgid "Could not find required configuration option" msgstr "No se ha podido encontrar la opción de configuración necesaria " #, python-format msgid "Could not find task %s" msgstr "No se encontró tarea %s" #, python-format msgid "Could not update image: %s" msgstr "No se ha podido actualizar la imagen: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" "Actualmente no se da soporte a los paquetes OVA que contengan múltiples " "discos." msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "La lista de verificador personalizada debe contener tuplas '(función, " "mensaje)'" #, python-format msgid "Data for image_id not found: %s" msgstr "No se encuentran los datos de image_id: %s" msgid "Data supplied was not valid." msgstr "Los datos proporcionados no son válidos. " msgid "Date and time of image member creation" msgstr "Fecha y hora de creación del miembro de la imagen" msgid "Date and time of image registration" msgstr "Fecha y hora del registro de la imagen" msgid "Date and time of last modification of image member" msgstr "Fecha y hora de la última modificación del miembro de la imagen" msgid "Date and time of namespace creation" msgstr "Fecha y hora de creación del espacio de nombre" msgid "Date and time of object creation" msgstr "Fecha y hora de creación del objeto" msgid "Date and time of resource type association" msgstr "Fecha y hora de asociación del tipo de recurso" msgid "Date and time of tag creation" msgstr "Fecha y hora de creación de la etiqueta" msgid "Date and time of the last image modification" msgstr "Fecha y hora de la última modificación de la imagen" msgid "Date and time of the last namespace modification" msgstr "Fecha y hora de la última modificación de espacio de nombre" msgid "Date and time of the last object modification" msgstr "Fecha y hora de la última modificación del objeto" msgid "Date and time of the last resource type association modification" msgstr "" "Fecha y hora de la última modificación de la asociación del tipo de recurso" msgid "Date and time of the last tag modification" msgstr "Fecha y hora de la última modificación de la etiqueta" msgid "Datetime when this resource was created" msgstr "Fecha en la cual se creó este recurso" msgid "Datetime when this resource was updated" msgstr "Fecha en la cual se actualizó este recurso" msgid "Datetime when this resource would be subject to removal" msgstr "Fecha en la cual este recurso estará sujeto a eliminación" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "Valor predeterminado para el número de elementos devueltos por una solicitud " "si no se especifica de forma explícita en la solicitud" msgid "Default value is invalid" msgstr "El valor por defecto no es válido" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "Denegando intento de carga de artefacto porque excede la capacidad: %s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "Denegando intento de carga de imagen porque excede la capacidad: %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "Denegando intento de cargar una imagen mayor que %d bytes." #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "Se debe eliminar primero la propiedad de dependencia '%s'" msgid "Dependency relations cannot be mutable" msgstr "Las relaciones de dependencia no se pueden permutar" msgid "Deploy the v1 OpenStack Images API." msgstr "Implemente la API de imágenes v1 de OpenStack. " msgid "Deploy the v1 OpenStack Registry API." msgstr "Implemente la API de registro v1 de OpenStack. " msgid "Deploy the v2 OpenStack Images API." msgstr "Despliegue la API de imágenes v2 de OpenStack. " msgid "Deploy the v2 OpenStack Registry API." msgstr "Despliegue la API de registro v2 de OpenStack. " msgid "Descriptive name for the image" msgstr "Nombre descriptivo para la imagen" msgid "Dictionary contains unexpected key(s)" msgstr "El diccionario contiene clave(s) inesperadas" msgid "Dictionary size is greater than maximum" msgstr "El tamaño del diccionario es es mayor que el máximo" msgid "Dictionary size is less than minimum" msgstr "El tamaño del diccioanrio es menor que el mínimo" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "Algoritmo Digest que se utilizará para la firma digital. Utilice el comando " "\"openssl list-message-digest-algorithmsl\" para ver los algoritmos " "disponibles soportados por la version de OpenSSL en la plataforma. Por " "ejemplo, \"sha1\", \"sha256\", \"sha512\", etc." msgid "Disk format is not specified." msgstr "No se especificó el formato del disco." msgid "Does not match pattern" msgstr "No coincide el patrón" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "El controlador %(driver_name)s no se ha podido configurar correctamente. " "Razón: %(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "Se debe especificar un fichero o legacy_image_id" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Error al descodificar la solicitud. La URL o el cuerpo solicitado contenían " "caracteres que se han podido descodificar en Glance" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "Error al captar los miembros de la imagen %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "" "Error en la configuración de almacén. La adición de artefactos al almacén " "está inhabilitada." msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Error en la configuración del almacén. Se ha inhabilitado la adición de " "imágenes a almacen." msgid "Error occurred while creating the verifier" msgstr "Se producido un error al crear el verificador" msgid "Error occurred while verifying the signature" msgstr "Se producido un error al verificar la firma" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Se eperaba un miembro con el formato: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Se eperaba un estado con el formato: {\"status\": \"status\"}" msgid "External source should not be empty" msgstr "El origen externo no puede estar vacío" #, python-format msgid "External sources are not supported: '%s'" msgstr "No se soportan fuentes externas: '%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "Error al activar imagen. Se ha obtenido error: %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "Error al agregar metadatos de imagen. Se obtuvo error: %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "No se pudo encontrar artefacto %(artifact_id)s para eliminar" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "No se pudo encontrar imagen %(image_id)s para eliminar" #, python-format msgid "Failed to find image to delete: %s" msgstr "No se ha encontrado la imagen para eliminar: %s" #, python-format msgid "Failed to find image to update: %s" msgstr "No se encontró imagen para actualizar: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "No se encontró tipo de recurso %(resourcetype)s para eliminar" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "No se ha podido inicializar la base de datos de memoria caché de imagen. Se " "ha obtenido error: %s" #, python-format msgid "Failed to read %s from config" msgstr "No se ha podido leer %s de la configuración" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "Error al reservar imagen. Se ha obtenido error: %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "" "No se han podido actualizar metadatos de imagen. Se ha obtenido error: %s" #, python-format msgid "Failed to upload image %s" msgstr "No se cargó imagen %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "No se permite cargar datos de imagen para imagen %(image_id)s a causa de un " "error HTTP: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Error al cargar datos de imagen para imagen %(image_id)s a causa de un error " "interno: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "El archivo %(path)s tiene un archivo de respaldo %(bfile)s no válido, " "terminando de forma anormal." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "No se permiten las importaciones basadas en ficheros. Por favor use una " "fuente no-local de datos de imagen." msgid "File too large" msgstr "El fichero es demasiado grande" msgid "File too small" msgstr "El fichero es demasiado pequeño" msgid "Forbidden image access" msgstr "Acceso prohibido a la imagen" #, python-format msgid "Forbidden to delete a %s image." msgstr "Se prohíbe eliminar una imagen %s." #, python-format msgid "Forbidden to delete image: %s" msgstr "Está prohibido eliminar imagen: %s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "Prohibido modificar '%(key)s' de la imagen en estado %(status)s." #, python-format msgid "Forbidden to modify '%s' of image." msgstr "Prohibido modificar '%s' de la imagen." msgid "Forbidden to reserve image." msgstr "La reserva de imagen está prohibida." msgid "Forbidden to update deleted image." msgstr "La actualización de una imagen suprimida está prohibida." #, python-format msgid "Forbidden to update image: %s" msgstr "Se prohíbe actualizar imagen: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "Intento de carga prohibido: %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Solicitud no permitida, el espacio de nombre para la definición de metadatos=" "%s no es visible" #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Solicitud no permitida, la tarea %s no es visible" msgid "Format of the container" msgstr "Formato del contenedor" msgid "Format of the disk" msgstr "Formato del disco" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "Error al obtener datos de objeto %(name)s : %(err)s." #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "Error al obtener información de imagen %(id)s : %(err)s." msgid "Glance client not installed" msgstr "El cliente de Glance no está instalado" #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" no es válido." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host y puerto \"%s\" no es válido." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Solo se incluye mensaje informativo legible para humanos cuando sea " "apropiado (usualmente en error)" msgid "If False doesn't trace SQL requests." msgstr "Si Falce no rastrea solicitudes SQL." msgid "If False fully disable profiling feature." msgstr "Si es False inhabilita completamente la característica de perfilado." msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "Si es False, el servidor devolverá la cabecera \"Conexión: cerrada\" en sus " "respuestas, si es True, el servidor devolverá la cabecera \"Conexión: Keep-" "Alive\". Para cerrar la conexión de pila del cliente explícitamente antes de " "enviar la respuesta y de que se la lea el cliente, simplemente debe " "configurar la opción False cuando cree un servidor wsgi." msgid "If true, image will not be deletable." msgstr "Si es true, la imagen no se podrá suprimir." msgid "If true, namespace will not be deletable." msgstr "Si es true, no se podrá eliminar el espacio de nombre." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "No se pudo eliminar imagen %(id)s porque está en uso: %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "No se ha encontrado la imagen %(id)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "No se pudo encontrar imagen %(image_id)s después de subirla. Es posible que " "la imagen haya sido eliminada durante la carga: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "La imagen %(image_id)s está protegida y no se puede suprimir." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "No se pudo encontrar la imagen %s después de subirla. Es posible que la " "imagen haya sido eliminada durante la carga, limpiando los fragmentos " "cargados." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "" "No se puede encontrar la imagen %s después de la carga. Es posible que la " "imagen se haya eliminado durante la carga." #, python-format msgid "Image %s is deactivated" msgstr "Se ha desactivado la imagen %s" #, python-format msgid "Image %s is not active" msgstr "La imagen %s no está activa" #, python-format msgid "Image %s not found." msgstr "No se encontró imagen %s." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "La imagen excede la capacidad de almacenamiento: %s" msgid "Image id is required." msgstr "Se necesita id de imagen" msgid "Image is protected" msgstr "La imagen está protegida " #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "" "Se ha excedido el límite de miembro de imagen para imagen %(id)s: %(e)s:" #, python-format msgid "Image name too long: %d" msgstr "Nombre de imagen demasiado largo: %d" msgid "Image operation conflicts" msgstr "Conflictos de operación de imagen" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "No se permite la transición de estado %(cur_status)s a %(new_status)s" #, python-format msgid "Image storage media is full: %s" msgstr "El soporte de almacenamiento de imagen está lleno: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "" "Se ha excedido el límite de etiqueta de imagen para imagen %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Problema al cargar la imagen: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "¡Ya existe una imagen con el identificador %s!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "Se ha eliminado imagen identificada como %s." #, python-format msgid "Image with identifier %s not found" msgstr "No se ha encontrado la imagen con el identificador %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "No se ha podido encontrar la imagen con ID %(image_id)s" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Estrategia de autorización incorrecta, se esperaba \"%(expected)s\" pero se " "ha recibido \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Solicitud incorrecta: %s" msgid "Index is out of range" msgstr "El índice está fuera de alcance" msgid "Index is required" msgstr "Se necesita índexación" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "La entrada no contiene el campo '%(key)s'" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "" "Permisos insuficientes en el soporte de almacenamiento de artefacto: %s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Permisos insuficientes en el soporte de almacenamiento de imagen: %s " #, python-format msgid "Invalid Content-Type for work with %s" msgstr "Tipo de contenido no valido para trabajar con %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Puntero JSON no válido para este recurso: '/%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "Formato de certificado no válido: %s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "Suma de verificación '%s': no puede exceder los 32 caracteres" msgid "Invalid configuration in glance-swift conf file." msgstr "Configuración en fichero en glance-swift no válida." msgid "Invalid configuration in property protection file." msgstr "Configuración en fichero de protección de propiedad no válida." #, python-format msgid "Invalid container format '%s' for image." msgstr "Formato de contenedor '%s' no válido para imagen." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Tipo de contenido no válido %(content_type)s" msgid "Invalid dict property type" msgstr "Tipo de propiedad del diccionario no válido" msgid "Invalid dict property type specification" msgstr "Especificación de tipo de propiedad del diccionario no válida" #, python-format msgid "Invalid disk format '%s' for image." msgstr "Formato de disco '%s' no válido para imagen." #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Valor de filtro no válido %s. No se han cerrado comillas." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Valor de filtro no válido %s. No hay una coma antes de cerrar comillas." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "Valor de filtro no válido %s. No hay una coma antes de abrir comillas." #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "Cabeceras no válidas de \"Tipo de contenido\": %s" msgid "Invalid image id format" msgstr "Formato de id de imagen no válido" msgid "Invalid item type specification" msgstr "Especificación de tipo del elemento no válida" #, python-format msgid "Invalid json body: %s" msgstr "Cuerpo de json no válido: %s" msgid "Invalid jsonpatch request" msgstr "Solicitud de jsonpatch no válida" msgid "Invalid location" msgstr "Ubicación no válida" #, python-format msgid "Invalid location %s" msgstr "Ubicación %s no válida" #, python-format msgid "Invalid location: %s" msgstr "Ubicaciones no válidas: %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "Opción location_strategy no válida: %(name)s. La opción(es) válida(s) es/" "son: %(strategies)s" msgid "Invalid locations" msgstr "Ubicaciones no válidas" #, python-format msgid "Invalid locations: %s" msgstr "Ubicaciones no válidas: %s" msgid "Invalid marker format" msgstr "Formato de marcador no válido" msgid "Invalid marker. Image could not be found." msgstr "Marcador no válido. No se ha podido encontrar la imagen. " #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "mask_gen_algorithm no válido: %s" #, python-format msgid "Invalid membership association: %s" msgstr "Asociación de pertenencia no válida: %s " msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "Mezcla no válida de formatos de disco y contenedor. Al definir un formato de " "disco o de contenedor como 'aki', 'ari' o 'ami', los formatos de contenedor " "y de disco deben coincidir." #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Operación: `%(op)s` no válida. Debe ser una de las siguientes: %(available)s." msgid "Invalid position for adding a location." msgstr "Posición no válida para agregar ubicación." msgid "Invalid position for removing a location." msgstr "Posición no válida para eliminar ubicación." msgid "Invalid property definition" msgstr "Definición de propiedad no válida" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "pss_salt_length no válido: %s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "Tipo de clave pública no válido para el tipo de clave de firma: %s" msgid "Invalid reference list specification" msgstr "Especificación de lista de referencia no válida" msgid "Invalid referenced type" msgstr "Tipo referenciado no váido" msgid "Invalid request PATCH for work with blob" msgstr "Solicitud PATCH no valida para trabajar con blob" msgid "Invalid service catalog json." msgstr "JSON de catálogo de servicios no válido." #, python-format msgid "Invalid signature hash method: %s" msgstr "Método hash de firma no válido: %s" #, python-format msgid "Invalid signature key type: %s" msgstr "Tipo de clave de firma no válido: %s" #, python-format msgid "Invalid sort direction: %s" msgstr "Dirección de ordenación no válida : %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "Clave de ordenación no válida: %(sort_key)s. Si la versión de tipo no está " "configurada debe ser una de las siguientes: %(available)s." #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Clave de ordenación no válida: %(sort_key)s. Debe ser una de las siguientes: " "%(available)s." #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" "Clave de ordenación no válida: %(sort_key)s. No puede ordenar por medio de " "esta propiedad" #, python-format msgid "Invalid status value: %s" msgstr "Valor de estado no válido: %s" #, python-format msgid "Invalid status: %s" msgstr "Estado no válido: %s" #, python-format msgid "Invalid time format for %s." msgstr "Formato de hora no válido para %s." msgid "Invalid type definition" msgstr "Definición de tipo no válida" #, python-format msgid "Invalid type value: %s" msgstr "Valor de tipo no válido: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Actualización no válida. Como resultado será un espacio de nombre para la " "definición de metadatos duplicado con el mismo nombre de %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Actualización no válida. El resultado será un objeto para la definición de " "metadatos duplicado con el mismo nombre de=%(name)s en el espacio de nombre=" "%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Actualización no válida. El resultado será un objeto para la definición de " "metadatos duplicado con el mismo nombre de=%(name)s en el espacio de nombre=" "%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Actualización no válida. El resultado será una propiedad para la definición " "de metadatos duplicada con el mismo nombre de=%(name)s en espacio de nombre=" "%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Valor no válido'%(value)s' para parametro '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Valor no válido para opción %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Valor de visibilidad no válido : %s" msgid "Is not allowed value" msgstr "No es un valor permitido" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "Es posible que el módulo eventlet se haya importado previamente a " "configuración %s='yes'. Actualmente se necesita inhabilitar eventlet." "greendns si se está utilizando ipv6 ya que ahora eventlet.greendns se daña " "con direcciones ipv6. Por favor asegúrese de que eventlet no se importa " "antes de configurar esto. " msgid "It's invalid to provide multiple image sources." msgstr "Proporcionar múltiples fuentes para la imagen no es válido." msgid "It's not allowed to add locations if locations are invisible." msgstr "No se permite añadir ubicaciones si son invisibles." msgid "It's not allowed to remove locations if locations are invisible." msgstr "No se permite eliminar ubicaciones si son invisibles." msgid "It's not allowed to update locations if locations are invisible." msgstr "No se permite actualizar las ubicaciones si son invisibles." msgid "Items have to be unique" msgstr "Los elementos deben ser únicos" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "La ruta Json debe iniciar con a '/', finalizar con no '/', no se permiten 2 " "'/' subsecuentes." msgid "Legacy image was not found" msgstr "No se encontró herencia de imagen" msgid "Length is greater than maximum" msgstr "El tamaño es mayor que el máximo" msgid "Length is less than minimum" msgstr "El tamaño es menor que el mínimo" msgid "Limit param must be an integer" msgstr "el parámetro de límite debe ser un entero" msgid "Limit param must be positive" msgstr "el parámetro de límite debe ser positivo" #, python-format msgid "Limit param must not be higher than %d" msgstr "el parámetro de límite no debe ser mayor que %d" msgid "Limits request ID length." msgstr "Los límites solicitan longitud de ID." msgid "List definitions may hot have defaults" msgstr "Es probable que las definiciones de lista no tengan predeterminados" msgid "List of strings related to the image" msgstr "Lista de series relacionadas con la imagen" msgid "List size is greater than maximum" msgstr "El tamaño de la lista es mayor que el máximo" msgid "List size is less than minimum" msgstr "El tamaño de lista es menor que el mínimo" msgid "Loop time between checking for new items to schedule for delete." msgstr "" "Tiempo de bucle entre verificación para nuevos elementos para la " "planificación de eliminación." #, python-format msgid "Malformed Content-Range header: %s" msgstr "Cabecera de Rango de Contenido incorrecta: %s" msgid "Malformed JSON in request body." msgstr "JSON con formato incorrecto en el cuerpo de la solicitud." msgid "Max string length may not exceed 255 characters" msgstr "El tamaño máximo de serie no debe exceder 255 caracteres" msgid "Maximal age is count of days since epoch." msgstr "La edad máxima es el recuento de días desde epoch." msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "El tamaño máximo de línea de cabecera de mensaje que se aceptará. Es " "probable que se necesite aumentar max_header_line al usar tokens grandes " "(generalmente los que Keystone v3 API genera con grandes catálogos de " "servicio" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" "Número máximo de miembros de imagen por imagen. Los valores negativos se " "evalúan de manera ilimitada." msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Número máximo de ubicaciones permitidas en una imagen. Los valores negativos " "se evalúan de manera ilimitada." msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "" "Número máximo de propiedades permitidas en una imagen. Los valores negativos " "se evalúan de manera ilimitada." msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Número máximo de etiquetas permitidas en una imagen. Los valores negativos " "se evalúan de manera ilimitada." msgid "Maximum permissible number of items that could be returned by a request" msgstr "" "El número máximo permitido de elementos que podrían ser devueltos por una " "solicitud" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Se ha superado el máximo de redirecciones (%(redirects)s)." msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "El tamaño máximo que un usuario puede cargar en bytes. Por defecto hasta " "1099511627776 bytes (1 TB).ADVERTENCIA: este valor solo se debe aumentar " "luego de considerarlo cuidadosamente y debe configurarse en un valor menor a " "8 EB (9223372036854775808)." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Se ha duplicado miembro %(member_id)s para imagen %(image_id)s" msgid "Member can't be empty" msgstr "Miembro no puede estar vacío" msgid "Member to be added not specified" msgstr "No se ha especificado el miembro que añadir" msgid "Membership could not be found." msgstr "La pertenencia no se ha podido encontrar." #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "El espacio de nombre %(namespace)s de definición de metadatos está " "protegido y no puede eliminarse." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "" "No se encontró espacio de nombre para la definición de metadatos para id=%s" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "" "No se encontró el espacio de nombre para definición de metadatos=" "%(namespace_name)s." #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "El objeto %(object_name)s de definición de metadatos está protegido y no " "puede eliminarse." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "No se encontró el objeto para la definición de metadatos para id=%s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "La propiedad %(property_name)s de definición de metadatos está protegida y " "no puede eliminarse." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "No se encontró propiedad para la definición de metadatos para id=%s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "El tipo de recurso para la definición de metadatos %(resource_type_name)s es " "un tipo de sistema seeded y no puede eliminarse." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "La asociación de tipo de recurso %(resource_type)s de definición de " "metadatos está protegida y no puede eliminarse." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Etiqueta de definición de metadatos %(tag_name)s está protegida y no puede " "eliminarse." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "No se encontró etiqueta para la definición de metadatos para id=%s" msgid "Min string length may not be negative" msgstr "El tamaño mínimo de serie no puede ser negativo" msgid "Minimal rows limit is 1." msgstr "El número mínimo de filas es." #, python-format msgid "Missing required credential: %(required)s" msgstr "Falta la credencial necesaria :%(required)s " #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Varias coincidencias de servicio 'image' para la región %(region)s. Esto " "generalmente significa que es necesaria una región y que no se ha " "proporcionado ninguna." msgid "Must supply a positive, non-zero value for age." msgstr "Debe proporcionar un valor positivo distinto de cero para la edad." msgid "Name of the paste configuration file." msgstr "Nombre del fichero de configuración de pegado." #, python-format msgid "No artifact found with ID %s" msgstr "No se encontró artefacto con ID %s" msgid "No authenticated user" msgstr "Ningún usuario autenticado " #, python-format msgid "No image found with ID %s" msgstr "No se encontró imagen con ID %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "No se encontró ubicación con ID %(loc)s de imagen %(img)s" msgid "No permission to share that image" msgstr "No existe permiso para compartir esa imagen" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "No se ha cargado complemento para '%(name)s'" msgid "No property to access" msgstr "No existe propiedad a la cual acceder" #, python-format msgid "No such key '%s' in a dict" msgstr "No hay tal clave '%s' en un dict " #, python-format msgid "Not a blob property '%s'" msgstr "'%s' no es una propiedad de objeto" msgid "Not a downloadable entity" msgstr "No es una entidad descargable" msgid "Not a list property" msgstr "No es una propiedad de lista" #, python-format msgid "Not a list property '%s'" msgstr "'%s' no es una propiedad de lista" msgid "Not a valid value type" msgstr "Tipo de valor no válido" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "No todas las dependencias de encuentran en estado '%s'" #, python-format msgid "Not allowed to create members for image %s." msgstr "No se permite crear miembros para imagen %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "No está permitido eliminar imagen en estado '%s'" #, python-format msgid "Not allowed to delete members for image %s." msgstr "No se permite eliminar miembros para imagen %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "No se permite eliminar etiquetas para imagen %s." #, python-format msgid "Not allowed to list members for image %s." msgstr "No se permite listar miembros para imagen %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "No está permitido reactivar imagen en estado'%s'" #, python-format msgid "Not allowed to update members for image %s." msgstr "No se permite actualizar miembros para imagen %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "No se permite actualizar etiquetas para imagen %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "No se permite cargar datos de imagen para imagen %(image_id)s: %(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "'%s' no es un índice matriz" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "El número de dirs de ordenación no coincide con el número de claves de " "ordenación" msgid "OVA extract is limited to admin" msgstr "La extracción de OVA está limitada al administrador" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "No se han especificado los metadatos OVF de interés en el achivo de " "configuración ovf-metadata.json. Defina \"cim_pasd\" en una lista de " "propiedades CIM_ProcessorAllocationSettingData interesadas." msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "" "No se ha encontrado el archivo de configuración de propiedades OVF \"ovf-" "metadata.json\"." msgid "Old and new sorting syntax cannot be combined" msgstr "No se puede combinar la antigua y nueva sintaxis de ordenación" msgid "Only list indexes are allowed for blob lists" msgstr "Solo se permiten índices de lista para listas de objeto" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "La operación \"%s\" requiere un miembro llamado \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Los objetos de operación pueden contener exactamente un miembro llamado \"add" "\", \"remove\" o \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Los objetos de operación solo pueden contener un miembro llamado \"add\", " "\"remove\" o \"replace\"." msgid "Operations must be JSON objects." msgstr "Las operaciones deben ser objetos JSON." #, python-format msgid "Operator %(op)s is not supported" msgstr "No se soporta operador %(op)s " #, python-format msgid "Original locations is not empty: %s" msgstr "Las ubicaciones originales no están vacías: %s" msgid "Owner can't be updated by non admin." msgstr "Un usuario no admin no puede actualizar al propietario." msgid "Owner must be specified to create a tag." msgstr "Se debe especificar el propietario para crear etiqueta." msgid "Owner of the image" msgstr "Propietario de la imagen" msgid "Owner of the namespace." msgstr "Propietario del espacio de nombre." msgid "Param values can't contain 4 byte unicode." msgstr "Los valores de parámetro no pueden contener 4 byte unicode." msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "Se eliminó el nombre parcial de una segmentación en el fichero de " "configuración de pegado con el nombre de servicio. Por ejemplo, si pega la " "sección de nombre [pipeline:glance-api-keystone] utilice el valor \"keystone" "\"" msgid "Path to the directory where json metadata files are stored" msgstr "" "Ruta al directorio en donde se almacenan los ficheros de metadatos json" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" "El nombre de complemento '%(plugin)s' debe coincidir con el nombre de tipo " "de artefacto %(name)s'" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "El puntero `%s` contiene un \"~\" que no forma parte de una secuencia de " "escape reconocida." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "El puntero `%s` contiene adyacente \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "El puntero `%s` contiene un token no válido." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "El puntero `%s` no empieza por \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "El puntero `%s` termina en \"/\"." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "" "El puntero contiene un \"~\" que no forma parte de una secuencia de escape " "reconocida [~0, ~1]." #, python-format msgid "Port \"%s\" is not valid." msgstr "Puerto \"%s\" no es válido." msgid "Port the registry server is listening on." msgstr "EL puerto en el cual el servidor de registro escucha." #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "" "El componente numérico de la versión anterior es demasiado largo (%d " "caracteres máx)" msgid "Private key file to use when starting API server securely." msgstr "" "Fichero de clave privado a usar cuando de inicia servidor API con seguridad." #, python-format msgid "Process %d not running" msgstr "No se está ejecutando proceso %d" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Las propiedades %s deben definirse antes de guardar datos." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "La propiedad %(property_name)s no inicia con el prefijo de asociación del " "tipo de recurso esperado de '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "La propiedad %s ya está presente." #, python-format msgid "Property %s does not exist." msgstr "La propiedad %s no existe." #, python-format msgid "Property %s may not be removed." msgstr "La propiedad %s no se puede eliminar." #, python-format msgid "Property %s must be set prior to saving data." msgstr "La propiedad %s debe definirse antes de guardar datos." #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "" "Es probable que la propiedad '%(name)s' no tenga valor '%(val)s': %(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "Propiedad '%s' está protegida" msgid "Property names can't contain 4 byte unicode." msgstr "Los nombre de propiedad no pueden contener 4 byte unicode." #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "No se encontró protección de propiedad en operación %(operation)s para " "regla %(rule)s. Ningún rol permitirá realizar esta operación." #, python-format msgid "Property's %(prop)s value has not been found" msgstr "No se ha encontrado valor de la propiedad %(prop)s " #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "El tamaño de imagen proporcionado debe coincidir con el tamaño de la imagen " "almacenada. (tamaño proporcionado: %(ps)d, tamaño almacenado: %(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "" "El objeto proporcionado no coincide con el esquema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "No se soporta el estado de tarea proporcionado: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "No se soporta el tipo de tarea proporcionado: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Proporciona una descripción sencilla del espacio de nombre." msgid "Public images do not have members." msgstr "Las imágenes públicas no tienen miembros." msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "URL pública para usar para versiones endpoint. Por defecto es Ninguno, el " "cual utilizará el atributo host_url de la solicitud para poblar la base URL. " "Si Glance está operando tras un proxy, es posible que quiera cambiarla para " "representar la URL de proxy." msgid "Python module path of data access API" msgstr "La vía de acceso del módulo Python de la API de acceso a datos" msgid "Received invalid HTTP redirect." msgstr "Se ha recibido redirección HTTP no válida. " #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Redirigiendo a %(uri)s para la autorización. " #, python-format msgid "Registry service can't use %s" msgstr "El servicio de registro no puede usar %s" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "El registro no se ha configurado correctamente en el servidor de API. Razón: " "%(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "Es probable que la relación %(name)s no contenga valores múltiples" #, python-format msgid "Reload of %(serv)s not supported" msgstr "No se soporta la recarga de %(serv)s" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Recargando %(serv)s (pid %(pid)s) con señal (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Eliminando fichero de identificación positiva obsoleto %s" msgid "Request body must be a JSON array of operation objects." msgstr "" "El cuerpo de la solicitud debe ser una matriz JSON de objetos de operación." msgid "Request must be a list of commands" msgstr "La solicitud debe ser una lista de comandos." msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "" "No existen las propiedades de imagen necesarias para la verificación de " "firma. No se puede verificar la firma." #, python-format msgid "Required store %s is invalid" msgstr "El almacén %s solicitado no es válido" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "Los nombres de tipo de recurso beben alinearse con los tipos de recurso Heat " "en cualquier momento: http://docs.openstack.org/developer/heat/" "template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "La respuesta de Keystone no contiene un punto final Glance." msgid "Role used to identify an authenticated user as administrator." msgstr "El rol identificaba antes el usuario autenticado como administrador." msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "Ejecución como un proceso de larga duración. Cuando no se especifique (por " "defecto), ejecuta la operación de depuración una vez y luego sale. Cuando se " "especifique no salga y ejecute depurador en intervalo wakeup_time como se " "especifica en la configuración." msgid "Scope of image accessibility" msgstr "Ámbito de accesibilidad de la imagen" msgid "Scope of namespace accessibility." msgstr "Alcance de accesibilidad del espacio de nombre." msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "" "Clave secreta a utilizar para firmar los mensajes de rastreo de los " "servicios de la API y el Registro de Glance." #, python-format msgid "Server %(serv)s is stopped" msgstr "El servidor %(serv)s se ha detenido" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "" "Se ha encontrado un error en la creación del trabajador de servidor: " "%(reason)s." msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "Establezca una capacidad amplia del sistema para cada usuario. Este valor es " "la capacidad total que un usuario puede utilizar a lo largo de todos los " "sistemas de almacenamiento. Un valor de ero significa ilimitado. Se puede " "especificar una unidad opcional para el valor. La unidades que se aceptan " "son B, KB, MB, GB y TB representando respectivamente Bytes, KiloBytes, " "MegaBytes, GigaBytes y TeraBytes. Si no se espceifica unidad entonces se " "asume que son Bytes. Tenga en cuenta que no debe haber un espacio entre " "valor y unidad y las unidades distinguen mayúsculas." #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "No se soporta el nivel de vista %(shl)s en esta operación" msgid "Signature verification failed" msgstr "Ha fallado la verificación de firma" msgid "Signature verification failed." msgstr "Ha fallado la verificación de firma." msgid "Size of image file in bytes" msgstr "Tamaño del archivo de imagen en bytes" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Algunos tipos de recurso aceptan más de una clave / par de valor por " "instancia. Por ejemplo, Cinder permite metadatos de usuario e imagen en " "volúmenes. Nova solo evalúa los metadatos de propiedades de imagen " "(planeadores y controladores). Esta propiedad permite un espacio de nombre " "para eliminar la ambigüedad." msgid "Sort direction supplied was not valid." msgstr "La dirección de ordenación proporcionada no es válida." msgid "Sort key supplied was not valid." msgstr "La clave de clasificación proporcionada no es válida. " msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Especifica el prefijo que se usará para el tipo de recurso dado. Cualquier " "propiedad en el espacio de nombre deben tener este prefijo cuando se aplica " "al tipo de recurso especificado. Debe incluir separador de prefijo(por " "ejemplo un punto :)." msgid "Specifies which task executor to be used to run the task scripts." msgstr "" "Especifica cual ejecutor de tarea se usará para ejecutar los scripts de " "tarea." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "el estado debe ser \"pending\", \"accepted\" o \"rejected\"." msgid "Status not specified" msgstr "Estado no especificado" msgid "Status of the image" msgstr "Estado de la imaen" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "No se permite la transición de %(cur_status)s a %(new_status)s" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Deteniendo %(serv)s (pid %(pid)s) con señal (%(sig)s)" #, python-format msgid "Store for image_id not found: %s" msgstr "No se ha encontrado el almacenamiento para image_id: %s" #, python-format msgid "Store for scheme %s not found" msgstr "El almacén para el esquema %s no se ha encontrado" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "%(attr)s (%(supplied)s) y %(attr)s proporcionados que se han generado desde " "la imagen cargada (%(actual)s) no coinciden. Definiendo estado de imagen " "como 'killed'." msgid "Supported values for the 'container_format' image attribute" msgstr "Valores para el atributo de imagen 'container_format' soportados" msgid "Supported values for the 'disk_format' image attribute" msgstr "Valores para el atributo de imagen 'disk_format' soportados" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Se suprimió respawn como %(serv)s era %(rsn)s." msgid "System SIGHUP signal received." msgstr "Se ha recibido señal de sistema SIGHUP." #, python-format msgid "Task '%s' is required" msgstr "Se necesita tarea '%s'" msgid "Task does not exist" msgstr "La tarea no existe" msgid "Task failed due to Internal Error" msgstr "La tarea ha fallado a causa de un Error Interno" msgid "Task was not configured properly" msgstr "La tarea no se configuró correctamente" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "No se encontró tarea con id %(task_id)s proporcionado" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "El filtro \"changes-since\" ya no está disponible en v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "El archivo CA %s que ha especificado no existe" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "El objeto de imagen %(image_id)s que la tarea %(task_id)s está creando, ya " "no tiene un estado válido para un próximo procesamiento. " msgid "The Store URI was malformed." msgstr "El URI del almacén tenía un formato incorrecto." msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "La URL al servicio de keystone. Si \"use_user_token\" no tiene efecto y usa " "keystone auth, entonces se puede especificar la URL de keystone." msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "" "La dirección en donde el servicio de autenticación Swift es escuchado " "(rechazado)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "La contraseña de los administradores. Si \"use_user_token\" no tiene " "efecto, entonces se puede especificar las credenciales del administrador." msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "El nombre de usuario del administrador. Si \"use_user_token\" no tiene " "efecto, entonces se pueden especificar las credenciales del administrador." msgid "The amount of time in seconds to delay before performing a delete." msgstr "" "El lapso e tiempo en segundos en demora antes de realizar una eliminación." msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "El periodo de tiempo para dejar que una imagen incompleta permanezca en la " "memoria caché antes de que el podador de caché, si se está ejecutando, " "comience a limpiar el caché de imágenes." msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "El valor acumulado que se utilizará al crear la pila TCP de escucha." #, python-format msgid "The cert file you specified %s does not exist" msgstr "El archivo de certificado que ha especificado %s no existe" msgid "The config file that has the swift account(s)configs." msgstr "" "El fichero de configuración que tiene las configuraciones de cuenta(s) swift." msgid "The current status of this task" msgstr "El estado actual de esta tarea" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "El dispositivo que aloja el directorio de caché de imágenes " "%(image_cache_dir)s no soporta xattr. Es probable que tenga que editar fstab " "y añadir la poción user_xattr en la línea adecuada para que el dispositivo " "aloje el directorio de caché." msgid "The driver to use for image cache management." msgstr "" "El controlador que se usará para la administración del caché de imagen." #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "El formato de la versión %s no es válido. Utilice la notación semver" msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "El formato al cual se convertirán automaticamente las imágenes. Cuando se " "utilice el programa de fondo RBD, este valor se debe definir a 'crudo'" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "El uri proporcionado no es válido. Por favor especifique un uri válido de la " "siguiente lista de uri soportados %(supported)s" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "" "El nombre de host o la IP del proceso pydev que escucha las conexiones de " "depuración" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "La imagen %s ya se encuentra en el esclavo, pero no se encontró validación " "para esta. Esto indica que no tenemos permiso para ver todas las imágenes en " "el servidor esclavo." #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "El artefacto entrante es demasiado grande: %s" #, python-format msgid "The incoming image is too large: %s" msgstr "La imagen de entrada es demasiado grande: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "El archivo de claves que ha especificado %s no existe" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Se ha excedido el límite en el número permitido para ubicaciones de imagen. " "Intento: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Se ha excedido el límite en el número de miembros de imagen para esta " "imagen. Intentos: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Se ha excedido el límite en el número permitido para propiedades de imagen. " "Intento: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "Se ha excedido el límite en el número de propiedades de imagen permitidas. " "Intentos: %(num)s, Máximo: %(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "Se ha excedido el límite en el número permitido para etiquetas de imagen. " "Intento: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "Ya existe la ubicación %(location)s" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Los datos de ubicación contienen un ID no válido: %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "La ubicación del fichero de protección de propiedad. Este fichero contiene " "las normas para las protecciones de propiedad y roles/políticas asociadas a " "éste. Si no se especifica este valor de configuración, por defecto, las " "protecciones de propiedad no tendrán efecto. Si se especifica un valor y no " "se encuentra el fichero, entonces el servicio glance-api no iniciará." #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "No se borró la definición de metadatos%(record_type)s de nombre=" "%(record_name)s- Otros archivos aún se refieren a ésta." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "Ya existe el espacio de nombre para definición de metadatos=" "%(namespace_name)s" #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "No se encontró el objeto para definición de metadatos de nombre=" "%(object_name)s en espacio de nombre=%(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "No se encontró la propiedad para definición de metadatos de nombre=" "%(property_name)s en espacio de nombre=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "Ya existe la asociación de tipo de recurso del tipo de recurso=" "%(resource_type_name)s para el espacio de nombre=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "No se encontró la asociación de tipo de recurso del tipo de recurso para " "definición de metadatos=%(resource_type_name)s para el espacio de nombre=" "%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "No se encontró el tipo de recurso para definición de metadatos de nombre=" "%(resource_type_name)s" #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "No se encontró la etiqueta para definición de metadatos de nombre=%(name)s " "en el espacio de nombre=%(namespace_name)s." msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "" "El modo en el cual se ejecutará el motor. Puede ser 'serial' o 'paralelo'" msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "El número de trabajadores en procesos hijo que se creará para solicitudes de " "servicio. El número por defecto será igual al de CPUs disponibles." msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "La cantidad de actividades paralelas que el motor ejecuta al mismo tiempo. " "El valor puede ser mayor a uno cuando el motor está en modo 'paralelo'." msgid "The parameters required by task, JSON blob" msgstr "Los parámetros requeridos por tarea, objeto JSON" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "La ruta al fichero de certificado para usar en conexiones SSL al servidor de " "registro, si existe. Alternativamente, puede establecer la variable de " "ambiente GLANCE_CLIENT_CERT_FILE a una ruta de fichero del fichero de " "certificado CA" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "La ruta al fichero de certificado de la autoridad que certifica para usar en " "conexiones SSL al servidor de registro, si existe. Alternativamente, puede " "establecer la variable de ambiente GLANCE_CLIENT_CA_FILE a una ruta de " "fichero del fichero de certificado CA" msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "La ruta al fichero de clave para usar conexiones SSL al servidor de " "registro, si lo hay. Alternativamente, puede establecer la variable de " "ambiente GLANCE_CLIENT_KEY_FILE a una ruta de fichero del fichero de clave" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "" "La ruta a la base de datos de fichero de SQLite que se usará para la " "administración del caché de imagen." msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "El periodo de tiempo, en segundos, que el servidor API esperará para que se " "complete una solicitud de registro. Un valor de 0 implica que no hay tiempo " "de espera." msgid "The port on which a pydev process is listening for connections." msgstr "El puerto en el que un proceso pydev escucha las conexiones." msgid "The port on which the server will listen." msgstr "El puerto en el cual el servidor escuchará." msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "" "El protocolo a usar para la comunicación con el servidor de registro. Http o " "https." #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "" "El cuerpo %(body)s proporcionado no es válido bajo el esquema : %(schema)s" msgid "The provided image is too large." msgstr "La imagen proporcionada es demasiado grande." #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "La ruta '%(path)s' proporcioanda no es válida: %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "La referencia para los parámetros de cuenta swift/almacén de respaldo por " "defecto utilizada para agregar nuevas imágenes." msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "La región para el servicio de autenticación. Si \"use_user_token\" no tiene " "efecto y utiliza keystone auth, entonces se puede especificar el nombre de " "la región." msgid "The request returned 500 Internal Server Error." msgstr "La solicitud ha devuelto el mensaje 500 Error interno del servidor." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "La solicitud ha devuelto un error 503 Servicio no disponible. Esto sucede " "generalmente por una sobrecarga del servicio o una interrupción transitoria." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "La solicitud ha devuelto un 302 Múltiples opciones. Generalmente esto " "significa que no se ha incluido un indicador de versión en un URI de " "solicitud.\n" "\n" "El cuerpo de la respuesta devuelta:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La solicitud ha devuelto un error 413 Entidad de solicitud demasiado grande. " "Esto generalmente significa que se ha infringido el límite de índice o un " "umbral de cuota.\n" "\n" "El cuerpo de la respuesta:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La solicitud ha devuelto un estado inesperado: %(status)s.\n" "\n" "El cuerpo de la respuesta:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "Se ha desactivado la imagen solicitada. Se prohíbe la descarga de datos de " "imagen." msgid "The result of current task, JSON blob" msgstr "El resultado de la tarea, objeto JSON actual" msgid "The signature data was not properly encoded using base64" msgstr "Los datos de firma no se han codificado correctamente con base64" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "El tamaño de los datos %(image_size)s excederá el límite. Quedan " "%(remaining)s bytes" msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "Tamaño de agrupación de hebras que se va a utilizar para depurar imágenes. " "El valor predeterminado es uno, que signnifica depuración serie. Cualquier " "valor por encima de uno incida el número máximo de imágenes que se pueden " "depurar en paralelo." #, python-format msgid "The specified member %s could not be found" msgstr "No se pudo encontrar el miembro %s especificado" #, python-format msgid "The specified metadata object %s could not be found" msgstr "No se pudo encontrar el objeto de metadatos %s especificado" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "No se pudo encontrar la etiqueta de metadatos %s especificada" #, python-format msgid "The specified namespace %s could not be found" msgstr "No se ha podido encontrar el espacio de nombre %s especificado" #, python-format msgid "The specified property %s could not be found" msgstr "No se pudo encontrar la propiedad %s especificada" #, python-format msgid "The specified resource type %s could not be found " msgstr "No se pudo encontrar el tipo de recurso %s especificado" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "El estado de la ubicación de la imagen eliminada solo se puede establecer " "como 'pending_delete' o 'deleted'." msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "El estado de la ubicación de imagen eliminada solo se puede establecer como " "'pending_delete' o 'deleted'." msgid "The status of this image member" msgstr "El estado de este miembro de la imagen" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "Los nombres de almacén a usar para obtener orden de preferencia de " "almacenamiento. El nombre se debe registrar por uno de los almacenes " "definidos por la opción de configuración 'stores'. Esta opción se aplicará " "cuando se use la opción 'store_type' como estrategia de ubicación de imagen " "definida por la opción de configuración 'location_strategy'." msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "La estrategia a usar para la autenticación. SI \"use_user_token\" no tiene " "efecto, entonces, se puede especificar la estrategia auth." #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "El miembro meta %(member_id)s ya está asociado con la imagen %(image_id)s." msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "El nombre global del usuario de administrador. Si \"use_user_token\" no " "tiene efecto, entonces se puede especificar el nombre global del " "administrador." msgid "The type of task represented by this content" msgstr "El tipo de tarea representada por este contenido" msgid "The unique namespace text." msgstr "EL único texto de espacio de nombre." msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "El límite superior (el tamaño máximo de caché acumulado en bytes) más allá " "del cual el podador de caché, si se está ejecutando, comienza a limpiar el " "caché de imágenes." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "El nombre fácil de usar para el espacio de nombre. Utilizado por UI si está " "disponible." msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "" "El usuario para autenticar con relación al servicio de autenticación Swift " "(rechazar)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "El valor de la opción de pila TCP_KEEPIDLE. Es el tiempo en segundos que " "debe estar la conexión inactiva antes de que TCP comience a enviar sondas " "keepalive." #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "Hay un problema con %(error_key_name)s %(error_filename)s. Por favor " "verifique. Error: %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "Hay un problema con %(error_key_name)s %(error_filename)s. Por favor " "verifique. Error OpenSSL: %(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "Hay un problema con el par de claves. Por favor verifique que el certificado " "%(cert_file)s y clave %(key_file)s deben estar juntas. Error OpenSSL %(ce)s" msgid "There was an error configuring the client." msgstr "Se ha producido un error al configurar el cliente. " msgid "There was an error connecting to a server" msgstr "Se ha producido un error al conectar a un servidor " msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "Este valor de configuración indica si se usan los \"roles\" o \"políticas\" " "en el fichero de protección de propiedad." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Actualmente no se permite esta operación en las tareas Glance. Se eliminarán " "automáticamente después de alcanzar el tiempo con base en expires_at " "property." msgid "This operation is currently not permitted on Glance images details." msgstr "" "Actualmente no se permite la operación en los detalles de imagen de Glance." msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "Este valor establece la estrategia que se empleará para determinar el orden " "de ubicación de la imagen. Actualmente hay 2 estrategias empaquetadas con " "Glance 'location_order' y 'store_type'." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "Tiempo de vida en horas para la tarea, así tenga éxito o fracase" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "Tiempo de espera para las operaciones de conexión de pila del cliente. Si " "una conexión entrante está inactiva para este número de segundos se cerrará. " "Un valor de '0' significa esperar permanentemente." msgid "Too few arguments." msgstr "Muy pocos argumentos." msgid "Too few locations" msgstr "Hay muy pocas ubicaciones" msgid "Too many locations" msgstr "Hay demasiadas ubicaciones" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "" "El tamaño total de los bytes es %(size)d a lo largo de las imágenes " "%(img_count)d " msgid "Turn on/off delayed delete." msgstr "Encender/apagar eliminación retrasada." msgid "Type version has to be a valid semver string" msgstr "La versión del tipo debe se una secuencia semver válida" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "El URI no puede contener más de una aparición de un esquema. Si ha " "especificado un URI como swift://user:pass@http://authurl.com/v1/container/" "obj, tiene que cambiarlo para que utilice el esquema swift+http://, como: " "swift+http://user:pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "" "La URL para acceder al archivo de imagen se encuentra en un almacén externo" msgid "Unable to PUT to non-empty collection" msgstr "No es posible COLOCAR a una colección no vacía" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "No se puede crear fichero pid %(pid)s. ¿Ejecutar como non-root?\n" "Retrocediendo a fichero temporal, puede detener el uso de servicio " "%(service)s:\n" " %(file)s %(server)s detener--fichero-pid %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "No se puede filtrar con el operador desconocido '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "No se ha podido filtrar en un rango con un valor no numérico." msgid "Unable to filter on a unknown operator." msgstr "No se puede filtrar con un operador desconocido." msgid "Unable to filter using the specified operator." msgstr "No se ha podido filtrar utilizando el operador especificado." msgid "Unable to filter using the specified range." msgstr "No se ha podido filtrar mediante el rango especificado." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "No se ha podido encontrar '%s' en el cambio del esquema JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "No es posible encontrar `op` en cambio de JSON Schema. Debe ser uno de los " "siguientes: %(available)s. " msgid "Unable to get legacy image" msgstr "No se logró obtener herencia de imagen" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "No se puede aumentar el límite de descripción de fichero ¿Desea ejecutar " "como non-root?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "No se ha podido cargar %(app_name)s desde el archivo de configuración " "%(conf_file)s.\n" "Se ha obtenido: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "No se ha podido cargar el esquema: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "No se puede ubicar el fichero de configuración de pegado para %s." msgid "Unable to modify collection in immutable or readonly property" msgstr "" "No se puede modificar recolección en una propiedad no permutable o de solo " "lectura" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "No se ha podido recuperar el certificado con el ID: %s" msgid "Unable to retrieve request id from context" msgstr "No se ha podido recuperar el ID de solicitud del contexto" msgid "Unable to specify artifact type explicitly" msgstr "No se puede especificar el tipo de artefacto de manera explícita" msgid "Unable to specify artifact type version explicitly" msgstr "" "No se puede especificar la versión del tipo de artefacto de manera explícita" msgid "Unable to specify version if multiple types are possible" msgstr "No es posible especificar la versión si se pueden múltiples tipos" msgid "Unable to specify version if type is not specified" msgstr "No es posible especificar la versión si no se especifica el tipo" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "No se puede cargar datos de imagen duplicada %(image_id)s: %(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "" "No se puede verificar la firma porque el algoritmo no está soportado en este " "sistema" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "No se puede verificar la firma: %(reason)s" msgid "Unauthorized image access" msgstr "Acceso a imagen no autorizado" msgid "Unexpected body type. Expected list/dict." msgstr "Tipo de cuerpo inesperado. Se esperaba list/dict." #, python-format msgid "Unexpected response: %s" msgstr "Respuesta inesperada : %s " #, python-format msgid "Unknown auth strategy '%s'" msgstr "Estrategia de autenticación desconocida '%s' " #, python-format msgid "Unknown command: %s" msgstr "Comando desconocido %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Versión de borrador de esquema JSON no reconocida" msgid "Unrecognized changes-since value" msgstr "Valor de changes-since no reconocido" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "sort_dir no soportado. Valores aceptables: %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "sort_key no soportado. Valores aceptables: %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "Valor %(value)d fuera de rango, no debe exceder %(max)d" msgid "Value is greater than maximum" msgstr "El valor es mayor que el máximo" msgid "Value is less than minimum" msgstr "El valor es menor que el mínimo" msgid "Value is required" msgstr "Se necesita valor" #, python-format msgid "Version component is too large (%d max)" msgstr "EL componente de versión es demasiado largo %d máx)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "Versión no válida: %(reason)s" msgid "Virtual size of image in bytes" msgstr "Tamaño virtual de la imagen en bytes" msgid "Visibility must be either \"public\" or \"private\"" msgstr "La visibilidad debe ser \"pública\" o \"privada\"" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Se esperó 15 segundos para que pid %(pid)s (%(file)s) muriera; desistiendo" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "SI es false, ningún artefacto se puede cargar sin importar los " "available_plugins. Si es true, se pueden cargar los artefactos" msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "Al ejecutar el servidor en modalidad SSL, debe especificar un valor para las " "opciones cert_file y key_file en el archivo de configuración" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "Cuando aplica, esta opción establece al propietario de una imagen como " "usuario. De otro modo, el propietario de la imagen será el usuario " "autenticado que emite la solicitud." msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "No se requiere validación vía una autoridad que certifica si se usa SSL en " "conexiones al servidor de registro. Este es el equivalente del registro a " "especificar --inseguro en la línea de comando usando glanceclient para el " "API." msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "Si se permite que los usuarios especifiquen las propiedades de la imagen más " "allá de lo que proporciona el esquema de la imagen" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "Si se deben incluir las ubicaciones de imágenes back-end en las propiedades " "de imagen. Por ejemplo, si se utiliza el almacén dels sistema de archivos, " "se devolverá una URL de tipo \"file:///path/to/image\" al usuario en el " "campo de metadatos 'direct_url'. Revelar la ubicación de almacenamiento " "puede suponer un riesgo de seguridad, por lo que debe utilizar este " "parámetro con cuidado! Si lo establece en true, se sobrescribe la opción " "show_image_direct_url." msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "Si incluir o no la ubicación de almacenamiento de la imagen del programa de " "fondo en las propiedades de la imagen. Revelar la ubicación de " "almacenamiento puede ser un riesgo de seguridad, así que este valor debe " "utilizarse con precaución." msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "Si se debe o no pasar a través de las cabeceras que contienen usuario e " "información de tenencia cuando se hacen solicitudes al registro. Esto " "permite que el registro use el contexto middleware sin el auth_token " "middleware de keystonemiddleware, eliminando llamadas al servicio de " "keystone auth. Se recomienda que cuando se utilice esta opción, se garantice " "que la comunicación segura entre glance api y el registro glance por medio " "de un middleware diferente a auth_token." msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "Si se debe o no pasar a través del token del usuario cuando se hacen " "solicitudes al registro. Para prevenir fallas con la expiración del token " "durante la carga de ficheros grandes, se recomienda configurar este " "parámetro en False. Si \"use_user_token\" no tiene efecto, entonces se " "pueden especificar credenciales de administración." msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "Directorio de trabajo para operaciones de tarea asincrónica. El directorio " "configurado aquí se usará para operar sobre imágenes - normalmente antes de " "que sean importadas en el almacén de destino. Si proporciona un directorio " "de trabajo, asegúrese de que se está proporcionado suficiente espacio para " "que las tareas concurrentes se ejecuten eficientemente sin quedarse sin " "espacio. Se puede hacer una estimación aproximada al multiplicar el número " "de `max_workers` - o N trabajadores ejecutando - por un tamaño promedio de " "imagen (por ejemplo 500MB). La estimación del tamaño de imagen se debe hacer " "con base en el tamaño promedio en su implementación. Tenga en cuenta que " "dependiendo de la tarea que se ejecute pude necesitar multiplicar este " "número por algún factor dependiendo de lo que hace la tarea. Por ejemplo, " "va querer duplicar el tamaño disponible si se permite la conversión de " "imagen. Ahora que se dijo esto, recuerde que estas son solo estimaciones y " "debe hacerlas con base en el peor de los casos y estar listo para actuar en " "caso de que estén mal." #, python-format msgid "Wrong command structure: %s" msgstr "Estructura de comando incorrecta: %s" msgid "You are not authenticated." msgstr "No está autenticado." msgid "You are not authorized to complete this action." msgstr "No está autorizado a completar esta acción." #, python-format msgid "You are not authorized to lookup image %s." msgstr "No tiene autorización para buscar la imagen %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "No tiene autorización para buscar los miembros de la imagen %s." #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "" "No tiene permiso para crear etiqueta en el espacio de nombre propiedad de " "'%s'" msgid "You are not permitted to create image members for the image." msgstr "No tiene permiso para crear miembros de imagen para la imagen." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "No tiene permiso para crear imágenes propiedad de '%s'." #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "No tiene permiso para crear espacio de nombre propiedad de '%s'" #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "No tiene permiso para crear objeto propiedad de '%s'" #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "No tiene permiso para crear propiedad perteneciente a'%s'" #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "No tiene permiso para crear resource_type propiedad de '%s'" #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "No tiene permiso para crear esta tarea como propiedad de: '%s" msgid "You are not permitted to deactivate this image." msgstr "No tiene permiso para deactivar esta imagen." msgid "You are not permitted to delete this image." msgstr "No tiene permiso para suprimir esta imagen." msgid "You are not permitted to delete this meta_resource_type." msgstr "No tiene permiso para eliminar este meta_resource_type." msgid "You are not permitted to delete this namespace." msgstr "No tiene permiso para eliminar este espacio de nombre." msgid "You are not permitted to delete this object." msgstr "No tiene permiso para eliminar este objeto." msgid "You are not permitted to delete this property." msgstr "No tiene permiso para eliminar esta propiedad." msgid "You are not permitted to delete this tag." msgstr "No tiene permiso para eliminar esta etiqueta." #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "No tiene permiso para modificar '%(attr)s' en este %(resource)s." #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "No tiene permiso para modificar '%s' en esta imagen." msgid "You are not permitted to modify locations for this image." msgstr "No tiene permiso para modificar ubicaciones para esta imagen." msgid "You are not permitted to modify tags on this image." msgstr "No tiene permiso para modificar etiquetas en esta imagen." msgid "You are not permitted to modify this image." msgstr "No tiene permiso para modificar esta imagen." msgid "You are not permitted to reactivate this image." msgstr "No tiene permiso para reactivar esta imagen." msgid "You are not permitted to set status on this task." msgstr "No tiene permiso para configurar estado en esta tarea." msgid "You are not permitted to update this namespace." msgstr "No tiene permiso para actualizar este espacio de nombre." msgid "You are not permitted to update this object." msgstr "No tiene permiso para actualizar este objeto." msgid "You are not permitted to update this property." msgstr "No tiene permiso para actualizar esta propiedad." msgid "You are not permitted to update this tag." msgstr "No tiene permiso para actualizar esta etiqueta." msgid "You are not permitted to upload data for this image." msgstr "No tiene permiso para cargar datos para esta imagen." #, python-format msgid "You cannot add image member for %s" msgstr "No se puede añadir el miembro de la imagen para %s" #, python-format msgid "You cannot delete image member for %s" msgstr "No se puede suprimir el miembro de la imagen para %s" #, python-format msgid "You cannot get image member for %s" msgstr "No se puede obtener el miembro de la imagen para %s" #, python-format msgid "You cannot update image member %s" msgstr "No se puede actualizar el miembro de la imagen %s" msgid "You do not own this image" msgstr "No es propietario de esta imagen " msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Ha seleccionado utilizar SSL en la conexión y ha proporcionado un " "certificado, pero no ha proporcionado un parámetro key_file ni ha definido " "la variable de entorno GLANCE_CLIENT_KEY_FILE" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Ha seleccionado utilizar SSL en la conexión y ha proporcionado una clave, " "pero no ha proporcionado un parámetro cert_file ni ha definido la variable " "de entorno GLANCE_CLIENT_CERT_FILE" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() obtuvo un argumento de búsqueda inesperado '%s'" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "No se puede pasar de %(current)s a %(next)s en la actualización (se desea " "from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "las propiedades personalizadas (%(props)s) están en conflicto con las " "propiedades base" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Los concentradores de 'sondeo' y los de 'selección' no están disponibles en " "esta plataforma" msgid "is_public must be None, True, or False" msgstr "is_public debe ser None, True o False" msgid "limit param must be an integer" msgstr "el parámetro de límite debe ser un entero" msgid "limit param must be positive" msgstr "el parámetro de límite debe ser positivo" #, python-format msgid "location: %s data lost" msgstr "se han perdido los datos de ubicación: %s" msgid "md5 hash of image contents." msgstr "md5 hash de contenidos de imagen." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() obtuvo argumentos de búsqueda inesperados %s" msgid "protected must be True, or False" msgstr "protected debe ser True o False" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "No se puede iniciar %(serv)s. Se ha obtenido error: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id es demasiado largo, el tamaño máximo es %s" glance-12.0.0/glance/locale/ja/0000775000567000056710000000000012701407204017243 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/ja/LC_MESSAGES/0000775000567000056710000000000012701407204021030 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/ja/LC_MESSAGES/glance.po0000664000567000056710000037127712701407051022642 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Tomoyuki KATO , 2013 # Akihiro Motoki , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Yuko Katabami , 2015. #zanata # Tsutomu Kimura , 2016. #zanata # Yuta Hono , 2016. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev7\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-29 18:17+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-30 04:55+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s は文字列でなければなりません" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s は必須です" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s を %(length)i より長くすることはできません" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s を %(length)i より短くすることはできません" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s はパターン %(pattern)s に一致していなければなりません" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "最後の RPC 呼び出しで %(cls)s 例外が発生しました: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "イメージ %(i_id)s のメンバーリストで %(m_id)s が見つかりません。" #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) が実行中..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s は既に実行されている可能性があります: %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s はモジュールとして 2 回登録されています。%(module)s は使用されて" "いません。" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_type)s の %(task_id)s が正しく設定されていません。ファイルシステムスト" "アをロードできませんでした。" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_type)s の %(task_id)s が適切に設定されていません。作業ディレクトリー " "%(work_dir)s がありません" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(serv)s の %(verb)s 中" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(conf)s を使用して %(serv)s を %(verb)s 中" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s host:port のペアを指定してください。host は IPv4 アドレス、IPv6 アドレス、" "ホスト名、または FQDN です。IPv6 アドレスを使用する場合は、アドレスを大括弧で" "囲んでポートと区別してください (例えば、\"[fe80::a:b:c]:9876\")。" #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s に 4 バイトの Unicode 文字が含まれていてはなりません。" #, python-format msgid "%s is already stopped" msgstr "%s は既に停止しています" #, python-format msgid "%s is stopped" msgstr "%s は停止しています" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "'%(param)s の値が範囲外です。%(max)d を超えてはなりません" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "keystone 認証戦略が有効な場合は、--os_auth_url オプションまたはOS_AUTH_URL 環" "境変数が必要です\n" msgid "A body is not expected with this request." msgstr "この要求では本文は予期されません。" msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "形式名または名前-バージョンで許可されていない成果物のリスト。空のリストは、ど" "の成果物もロード可能であることを意味します。" #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "name=%(object_name)s のメタデータ定義オブジェクトは、namespace=" "%(namespace_name)s に既に存在します。" #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "name=%(property_name)s のメタデータ定義プロパティーは、namespace=" "%(namespace_name)s に既に存在します。" #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "name=%(resource_type_name)s のメタデータ定義リソースタイプは、既に存在しま" "す。" #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "name=%(name)s のメタデータタグは既に namespace=%(namespace_name)s に存在しま" "す。" msgid "A set of URLs to access the image file kept in external store" msgstr "" "外部ストアに保持されているイメージファイルにアクセスするための一連の URL" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "ストア「ロケーション」メタデータを暗号化するための AES 鍵。この鍵を使用する場" "合は、Swift または S3 の資格情報が組み込まれます。長さが 16 バイト、24 バイ" "ト、または 32 バイトのランダム文字列に設定する必要があります。" msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" "サーバーをバインドするアドレス。特定のネットワークインターフェースを選択する" "ときに役立ちます。" msgid "Address to find the registry server." msgstr "レジストリーサーバーを検索するアドレス。" msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "非認証ユーザーが、読み取り専用特権で API にアクセスできるようにします。これ" "は、ContextMiddleware の使用時にのみ適用されます。" #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "許可値 %s は指定されたバリデーター下では無効です" msgid "Amount of disk space (in GB) required to boot image." msgstr "イメージのブートに必要なディスクスペースの量 (GB)" msgid "Amount of ram (in MB) required to boot image." msgstr "イメージのブートに必要な RAM の量 (MB)" msgid "An identifier for the image" msgstr "イメージの ID" msgid "An identifier for the image member (tenantId)" msgstr "イメージメンバーの ID (テナント ID)" msgid "An identifier for the owner of this task" msgstr "このタスクの所有者 ID" msgid "An identifier for the task" msgstr "タスクの ID" msgid "An image file url" msgstr "イメージファイルの URL" msgid "An image schema url" msgstr "イメージスキーマの URL" msgid "An image self url" msgstr "イメージ自体の URL" #, python-format msgid "An image with identifier %s already exists" msgstr "ID %s のイメージは既に存在します" msgid "An import task exception occurred" msgstr "インポートタスクの例外が発生しました" msgid "An object with the same identifier already exists." msgstr "同じ ID のオブジェクトが既に存在します。" msgid "An object with the same identifier is currently being operated on." msgstr "現在、同じ ID を持つオブジェクトが操作されています。" msgid "An object with the specified identifier was not found." msgstr "指定された ID を持つオブジェクトが見つかりませんでした。" msgid "An unknown exception occurred" msgstr "不明な例外が発生しました" msgid "An unknown task exception occurred" msgstr "不明なタスク例外が発生しました" #, python-format msgid "Array has no element at position %d" msgstr "アレイの位置 %d に要素がありません" msgid "Array property can't have item_type=Array" msgstr "配列型のプロパティーには item_type=Array を設定できません" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "成果物 %s は使用中のため削除できませんでした: %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "成果物は状態を %(source)s から %(target)s に変更できません" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "成果物がストレージクォータを超えています: %s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "成果物にプロパティー %(prop)s がありません" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "成果物状態を %(curr)s から %(to)s に変更できません" #, python-format msgid "Artifact storage media is full: %s" msgstr "成果物ストレージのメディアがフルです: %s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "" "名前が「%(name)s」でバージョンが「%(version)s」の成果物タイプは認識されていま" "せん" msgid "Artifact with a circular dependency can not be created" msgstr "循環依存関係を持つ成果物は作成できません" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "id=%(id)s の成果物はアクセス不可です" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "id=%(id)s の成果物が見つかりませんでした" msgid "Artifact with the specified type, name and version already exists" msgstr "指定されたタイプ、名前、およびバージョンの成果物は既に存在します" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "指定されたタイプ、名前、およびバージョンの成果物には既に直接依存関係=%(dep)s " "があります" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "指定されたタイプ、名前、およびバージョンの成果物には既に推移依存関係=%(dep)s " "があります" msgid "Attempt to set readonly property" msgstr "読み取りプロパティーを設定しようとしました" msgid "Attempt to set value of immutable property" msgstr "不変プロパティーの値を設定しようとしました" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "重複したイメージのアップロードを試行します: %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "待機状況になっていないイメージの「ロケーション」フィールドを更新しようとしま" "した。" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "属性 '%(property)s' は読み取り専用です。" #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "属性 '%(property)s' は予約されています。" #, python-format msgid "Attribute '%s' is read-only." msgstr "属性 '%s' は読み取り専用です。" #, python-format msgid "Attribute '%s' is reserved." msgstr "属性 '%s' は予約されています。" msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "キューに入れられたイメージについてのみ属性 container_format を置換できます。" msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "キューに入れられたイメージについてのみ属性 disk_format を置換できます。" msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "Swift 認証サービスに対して認証するユーザーの認証キー。(非推奨)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "URL %(url)s の認証サービスが見つかりません。" #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "認証エラー - トークンがファイルアップロード中に失効した可能性があります。 %s " "へのイメージデータを削除します。" msgid "Authorization failed." msgstr "許可が失敗しました。" msgid "Available categories:" msgstr "使用可能カテゴリー:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "正しくない \"%s\" 照会フィルター形式。ISO 8601 DateTime 表記を使用してくださ" "い。" #, python-format msgid "Bad Command: %s" msgstr "正しくないコマンド: %s" #, python-format msgid "Bad header: %(header_name)s" msgstr "ヘッダーが正しくありません: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "正しくない値がフィルター %(filter)s に渡され、%(val)s が取得されました" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "S3 URI の形式が正しくありません: %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Swift URI 内の資格情報 '%(creds)s' の形式が正しくありません" msgid "Badly formed credentials in Swift URI." msgstr "Swift URI 内の資格情報の形式が正しくありません。" msgid "Base directory that the image cache uses." msgstr "イメージキャッシュで使用される基本ディレクトリー。" msgid "BinaryObject property cannot be declared mutable" msgstr "BinaryObject プロパティーは変更可能として宣言できません" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "Blob %(name)s には複数の値を設定できません" msgid "Blob size is not set" msgstr "Blob サイズが設定されていません" msgid "Body expected in request." msgstr "要求の本体が必要です。" msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "file および legacy_image_id の両方を同時に指定することはできません" msgid "CA certificate file to use to verify connecting clients." msgstr "接続クライアントを検査するために使用される CA 証明書ファイル。" msgid "Cannot be a negative value" msgstr "負の値にすることはできません" msgid "Cannot be a negative value." msgstr "負の値にすることはできません。" #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "イメージ %(key)s '%(value)s' を整数に変換できません。" msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "予約名「metadata」で成果物プロパティーを宣言することはできません" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "成果物 %(name)s をロードできません" msgid "Cannot remove last location in the image." msgstr "イメージ内の最後のロケーションは削除できません。" #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "イメージ %(image_id)s のデータを保存できません: %(error)s" msgid "Cannot set locations to empty list." msgstr "空のリストにロケーションを設定することはできません。" msgid "Cannot specify 'max_size' explicitly" msgstr "「max_size」を明示的に指定することはできません" msgid "Cannot specify 'min_size' explicitly" msgstr "「min_size」を明示的に指定することはできません" msgid "Cannot upload to an unqueued image" msgstr "キューに入れられていないイメージに対してアップロードできません" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "このパラメーターは演算子 %(op)s と併用できません" msgid "Certificate file to use when starting API server securely." msgstr "API サーバーを安全に始動するときに使用される証明書ファイル。" #, python-format msgid "Certificate format not supported: %s" msgstr "証明書の形式がサポートされません: %s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "証明書は以下の時間以降には無効です: %s UTC" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "証明書は以下の時間以前には無効です: %s UTC" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "チェックサムの検証に失敗しました。イメージ '%s' のキャッシュを打ち切りまし" "た。" msgid "Client disconnected before sending all data to backend" msgstr "すべてのデータをバックエンドへ送信する前にクライアントが切断されました" msgid "Command not found" msgstr "コマンドが見つかりません" msgid "Configuration option was not valid" msgstr "構成オプションが無効でした" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "接続エラー/URL %(url)s の認証サービスに対する正しくない要求。" #, python-format msgid "Constructed URL: %s" msgstr "URL を構成しました: %s" msgid "Container format is not specified." msgstr "コンテナーフォーマットが指定されていません。" msgid "Content-Type must be application/octet-stream" msgstr "Content-Type は application/octet-stream でなければなりません" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "イメージ %(image_id)s のイメージダウンロードが壊れています" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "30 秒間の試行後に %(host)s:%(port)s にバインドできませんでした" msgid "Could not find OVF file in OVA archive file." msgstr "OVA アーカイブファイル内に OVF ファイルが見つかりませんでした。" #, python-format msgid "Could not find metadata object %s" msgstr "メタデータオブジェクト %s が見つかりませんでした" #, python-format msgid "Could not find metadata tag %s" msgstr "メタデータタグ %s が見つかりませんでした" #, python-format msgid "Could not find namespace %s" msgstr "名前空間 %s が見つかりませんでした" #, python-format msgid "Could not find property %s" msgstr "プロパティー %s が見つかりませんでした" msgid "Could not find required configuration option" msgstr "必要な設定オプションが見つかりませんでした" #, python-format msgid "Could not find task %s" msgstr "タスク %s が見つかりませんでした" #, python-format msgid "Could not update image: %s" msgstr "イメージを更新できませんでした: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "現在、複数のディスクを含む OVA パッケージはサポートされません。" msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "カスタムバリデーターリストにはタプル '(function, message)' が含まれていなけれ" "ばなりません" #, python-format msgid "Data for image_id not found: %s" msgstr "image_id のデータが見つかりません: %s" msgid "Data supplied was not valid." msgstr "指定されたデータが無効でした。" msgid "Date and time of image member creation" msgstr "イメージメンバーの作成日時" msgid "Date and time of image registration" msgstr "イメージ登録日時" msgid "Date and time of last modification of image member" msgstr "イメージメンバーの最終変更日時" msgid "Date and time of namespace creation" msgstr "名前空間の作成日時" msgid "Date and time of object creation" msgstr "オブジェクトの作成日時" msgid "Date and time of resource type association" msgstr "リソースタイプ関連付けの日時" msgid "Date and time of tag creation" msgstr "タグの作成日時" msgid "Date and time of the last image modification" msgstr "イメージの最終変更日時" msgid "Date and time of the last namespace modification" msgstr "名前空間の最終変更日時" msgid "Date and time of the last object modification" msgstr "オブジェクトの最終変更日時" msgid "Date and time of the last resource type association modification" msgstr "リソースタイプ関連付けの最終変更日時" msgid "Date and time of the last tag modification" msgstr "タグの最終変更日時" msgid "Datetime when this resource was created" msgstr "このリソースが作成された日時" msgid "Datetime when this resource was updated" msgstr "このリソースが更新された日時" msgid "Datetime when this resource would be subject to removal" msgstr "このリソースが削除される日時" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "要求内で明示的に指定されていない場合に、要求によって返される項目数のデフォル" "ト値" msgid "Default value is invalid" msgstr "デフォルト値が無効です" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "" "成果物のアップロードを試みましたが、クォータを超えるため拒否されます: %s " #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" "イメージをアップロードしようとしましたが、割り当て量を超えてしまうため、拒否" "されています: %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "%d バイトより大きいイメージのアップロード試行を拒否しています。" #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "依存関係のプロパティー '%s' は最初に宣言される必要があります" msgid "Dependency relations cannot be mutable" msgstr "依存関係は変更可能にはできません" msgid "Deploy the v1 OpenStack Images API." msgstr "v1 OpenStack イメージ API をデプロイしてください。" msgid "Deploy the v1 OpenStack Registry API." msgstr "v1 OpenStack レジストリー API をデプロイします。" msgid "Deploy the v2 OpenStack Images API." msgstr "v2 OpenStack イメージ API をデプロイしてください。" msgid "Deploy the v2 OpenStack Registry API." msgstr "v2 OpenStack レジストリー API をデプロイします。" msgid "Descriptive name for the image" msgstr "イメージの記述名" msgid "Dictionary contains unexpected key(s)" msgstr "ディクショナリーに予期しないキーが含まれています" msgid "Dictionary size is greater than maximum" msgstr "ディクショナリーサイズが最大を上回っています" msgid "Dictionary size is less than minimum" msgstr "ディクショナリーサイズが最小を下回っています" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "デジタル署名に使用されるダイジェストアルゴリズム。コマンド \"openssl list-" "message-digest-algorithms\" を使用して、プラットフォーム上の OpenSSL のバー" "ジョンでサポートされる使用可能なアルゴリズムを取得します。例として " "\"sha1\"、\"sha256\"、\"sha512\" などがあります。" msgid "Disk format is not specified." msgstr "ディスクフォーマットが指定されていません。" msgid "Does not match pattern" msgstr "パターンに一致しません" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "ドライバー %(driver_name)s を正しく設定できませんでした。理由: %(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "file または legacy_image_id のいずれかを指定する必要があります" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "要求のデコードのエラー。URL または要求本文に Glance でデコードできない文字が" "含まれていました。" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "イメージ %(image_id)s のメンバーの取得中のエラー: %(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "" "ストア設定にエラーがあります。ストアへの成果物の追加が無効になっています。" msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "ストア設定にエラーがあります。ストアへのイメージの追加が無効になっています。" msgid "Error occurred while creating the verifier" msgstr "ベリファイヤーの作成中にエラーが発生しました" msgid "Error occurred while verifying the signature" msgstr "シグニチャーの検証中にエラーが発生しました" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "次の形式でメンバーを予期: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "次の形式で状態を予期: {\"status\": \"status\"}" msgid "External source should not be empty" msgstr "外部ソースは空であってはなりません" #, python-format msgid "External sources are not supported: '%s'" msgstr "外部ソースはサポートされていません: '%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "イメージのアクティブ化に失敗しました。受け取ったエラー: %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "イメージメタデータを追加できませんでした。受け取ったエラー: %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "削除する成果物 %(artifact_id)s が見つかりませんでした" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "削除するイメージ %(image_id)s が見つかりませんでした" #, python-format msgid "Failed to find image to delete: %s" msgstr "削除するイメージが見つかりませんでした: %s" #, python-format msgid "Failed to find image to update: %s" msgstr "更新するイメージが見つかりませんでした: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "削除するリソースタイプ %(resourcetype)s が見つかりませんでした" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "イメージキャッシュデータベースを初期化できませんでした。受け取ったエラー: %s" #, python-format msgid "Failed to read %s from config" msgstr "設定から %s を読み取ることができませんでした" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "イメージを予約できませんでした。受け取ったエラー: %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "イメージメタデータを更新できませんでした。エラー: %s" #, python-format msgid "Failed to upload image %s" msgstr "イメージ %s をアップロードできませんでした" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "HTTP エラーが発生したため、イメージ %(image_id)s のイメージデータのアップロー" "ドに失敗しました: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "内部エラーが発生したため、イメージ %(image_id)s のイメージデータをアップロー" "ドできませんでした: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "ファイル %(path)s に無効なバッキングファイル %(bfile)s があります。打ち切りま" "す。" msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "ファイルベースのインポートは許可されません。イメージデータの非ローカルソース" "を使用してください。" msgid "File too large" msgstr "ファイルが大きすぎます" msgid "File too small" msgstr "ファイルが小さすぎます" msgid "Forbidden image access" msgstr "イメージにアクセスする権限がありません" #, python-format msgid "Forbidden to delete a %s image." msgstr "%s イメージの削除は禁止されています。" #, python-format msgid "Forbidden to delete image: %s" msgstr "イメージの削除は禁止されています: %s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "%(status)s イメージの '%(key)s' を変更することは禁止されています。" #, python-format msgid "Forbidden to modify '%s' of image." msgstr "イメージの '%s' を変更することは禁止されています。" msgid "Forbidden to reserve image." msgstr "イメージの予約は禁止されています。" msgid "Forbidden to update deleted image." msgstr "削除されたイメージの更新は禁止されています。" #, python-format msgid "Forbidden to update image: %s" msgstr "イメージの更新は禁止されています: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "禁止されているアップロードの試行: %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "要求は禁止されています。メタデータ定義 namespace=%s を表示できません" #, python-format msgid "Forbidding request, task %s is not visible" msgstr "要求を禁止しています。タスク %s は表示されません" msgid "Format of the container" msgstr "コンテナーの形式" msgid "Format of the disk" msgstr "ディスクの形式" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "blob %(name)s のデータを取得できませんでした: %(err)s。" #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "イメージ %(id)s のデータを取得できませんでした: %(err)s。" msgid "Glance client not installed" msgstr "Glance クライアントがインストールされていません" #, python-format msgid "Host \"%s\" is not valid." msgstr "ホスト \"%s\" が無効です。" #, python-format msgid "Host and port \"%s\" is not valid." msgstr "ホストおよびポート \"%s\" が無効です。" msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "適切な場合 (通常は障害発生時) にのみ、人間が読み取れる情報メッセージが含まれ" "ます" msgid "If False doesn't trace SQL requests." msgstr "False が SQL 要求をトレースしない場合。" msgid "If False fully disable profiling feature." msgstr "False がプロファイル作成機能を完全に無効にする場合。" msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "False の場合、サーバーはヘッダー「Connection: close」を返します。True の場" "合、サーバーは応答として「Connection: Keep-Alive」を返します。応答が送信さ" "れ、クライアントによって正常に読み取られた後でクライアントソケット接続を明示" "的に閉じるには、wsgi サーバーの作成時にこのオプションを False に設定します。" msgid "If true, image will not be deletable." msgstr "true の場合、イメージは削除可能になりません。" msgid "If true, namespace will not be deletable." msgstr "true の場合、名前空間は削除可能になりません。" #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "イメージ %(id)s は使用中のため削除できませんでした: %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "イメージ %(id)s が見つかりません" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "アップロード後にイメージ %(image_id)s が見つかりませんでした。このイメージは" "アップロード中に削除された可能性があります: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "イメージ %(image_id)s は保護されているため、削除できません。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "アップロード後にイメージ %s が見つかりませんでした。イメージはアップロード中" "に削除された可能性があります。アップロードされたチャンクをクリーンアップ中で" "す。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "" "アップロード後にイメージ %s が見つかりませんでした。このイメージはアップロー" "ド中に削除された可能性があります。" #, python-format msgid "Image %s is deactivated" msgstr "イメージ %s は非アクティブ化されています" #, python-format msgid "Image %s is not active" msgstr "イメージ %s はアクティブではありません" #, python-format msgid "Image %s not found." msgstr "イメージ %s が見つかりません。" #, python-format msgid "Image exceeds the storage quota: %s" msgstr "イメージがストレージクォータを超えています: %s" msgid "Image id is required." msgstr "イメージ ID が必要です。" msgid "Image is protected" msgstr "イメージは保護されています" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "イメージ %(id)s のメンバー数がイメージメンバー上限を超えました: %(e)s:" #, python-format msgid "Image name too long: %d" msgstr "イメージ名が長すぎます: %d" msgid "Image operation conflicts" msgstr "イメージ操作が競合しています" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "%(cur_status)s から %(new_status)s へのイメージのステータス移行は許可されませ" "ん" #, python-format msgid "Image storage media is full: %s" msgstr "イメージストレージのメディアがフルです: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "イメージ %(id)s のイメージタグ上限を超えました: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "イメージのアップロード問題: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "ID %s のイメージは既に存在します。" #, python-format msgid "Image with identifier %s has been deleted." msgstr "ID %s のイメージが削除されました。" #, python-format msgid "Image with identifier %s not found" msgstr "ID %s のイメージが見つかりません" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "指定された ID %(image_id)s を持つイメージが見つかりませんでした" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "認証ストラテジーが誤っています。\"%(expected)s\" が必要ですが、\"%(received)s" "\" を受け取りました" #, python-format msgid "Incorrect request: %s" msgstr "正しくない要求: %s" msgid "Index is out of range" msgstr "インデックスは範囲外です" msgid "Index is required" msgstr "インデックスが必要です" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "入力に '%(key)s' フィールドが含まれていません" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "成果物ストレージのメディアに対する許可が不十分です: %s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "イメージストレージのメディアに対する許可が不十分です: %s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "%s の作業に対して無効な Content-Type" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "このリソースの JSON ポインターは無効です: '/%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "無効な証明書の形式: %s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "無効なチェックサム '%s': 32文字を超えることはできません" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift 設定ファイルの設定が無効です。" msgid "Invalid configuration in property protection file." msgstr "プロパティー保護ファイルで設定が無効です。" #, python-format msgid "Invalid container format '%s' for image." msgstr "コンテナー形式 '%s' はイメージには無効です。" #, python-format msgid "Invalid content type %(content_type)s" msgstr "コンテンツタイプ %(content_type)s が無効です" msgid "Invalid dict property type" msgstr "ディクショナリープロパティータイプが無効です" msgid "Invalid dict property type specification" msgstr "ディクショナリープロパティータイプ指定が無効です" #, python-format msgid "Invalid disk format '%s' for image." msgstr "ディスク形式 '%s' はイメージには無効です。" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "無効なフィルター値 %s。引用符が組みになっていません。" #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "無効なフィルター値 %s。終了引用符の後にコンマがありません。" #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "無効なフィルター値 %s。開始引用符の前にコンマがありません。" #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "無効なヘッダー \"Content-Type\": %s" msgid "Invalid image id format" msgstr "イメージ ID の形式が無効です" msgid "Invalid item type specification" msgstr "項目タイプ指定が無効です" #, python-format msgid "Invalid json body: %s" msgstr "無効な JSON 本文: %s" msgid "Invalid jsonpatch request" msgstr "jsonpatch 要求が無効です" #, fuzzy msgid "Invalid location" msgstr "無効なロケーション" #, python-format msgid "Invalid location %s" msgstr "無効なロケーション %s" #, python-format msgid "Invalid location: %s" msgstr "無効なロケーション: %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "location_strategy オプションが無効です: %(name)s。有効なストラテジーオプショ" "ン: %(strategies)s" #, fuzzy msgid "Invalid locations" msgstr "無効なロケーション" #, python-format msgid "Invalid locations: %s" msgstr "無効なロケーション: %s" msgid "Invalid marker format" msgstr "マーカーフォーマットが無効です" msgid "Invalid marker. Image could not be found." msgstr "無効なマーカーです。イメージが見つかりませんでした。" #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "無効な mask_gen_algorithm: %s" #, python-format msgid "Invalid membership association: %s" msgstr "無効なメンバーシップの関連付け: %s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "ディスクとコンテナーの形式が無効な形で混在しています。ディスクまたはコンテ" "ナーの形式を 'aki'、'ari'、または 'ami' のいずれかに設定するときは、コンテ" "ナーとディスクの形式が一致していなければなりません。" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "無効な操作: `%(op)s`。以下のいずれかでなければなりません: %(available)s。" msgid "Invalid position for adding a location." msgstr "ロケーションの追加位置が無効です。" msgid "Invalid position for removing a location." msgstr "ロケーション削除位置が無効です。" msgid "Invalid property definition" msgstr "プロパティー定義が無効です" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "無効な pss_salt_length: %s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "シグニチャーの鍵タイプの無効な公開鍵: %s" msgid "Invalid reference list specification" msgstr "参照リスト指定が無効です" msgid "Invalid referenced type" msgstr "参照されたタイプが無効です" msgid "Invalid request PATCH for work with blob" msgstr "blob の作業に対して無効な要求 PATCH" msgid "Invalid service catalog json." msgstr "無効なサービスカタログ JSON ファイル。" #, python-format msgid "Invalid signature hash method: %s" msgstr "無効なシグニチャーハッシュメソッド: %s" #, python-format msgid "Invalid signature key type: %s" msgstr "無効なシグニチャーの鍵タイプ: %s" #, python-format msgid "Invalid sort direction: %s" msgstr "無効なソート方向: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "無効なソートキー: %(sort_key)s。タイプのバージョンが設定されていない場合、以" "下のいずれでなければなりません: %(available)s。" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "ソートキー %(sort_key)s は無効です。 %(available)s のいずれかでなければなりま" "せん。" #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" "無効なソートキー: %(sort_key)s。このプロパティーによるソートはできません" #, fuzzy, python-format msgid "Invalid status value: %s" msgstr "状況値が無効です: %s" #, python-format msgid "Invalid status: %s" msgstr "無効な状況: %s" #, python-format msgid "Invalid time format for %s." msgstr "%s に対する無効な時刻フォーマット。" msgid "Invalid type definition" msgstr "タイプ定義が無効です" #, python-format msgid "Invalid type value: %s" msgstr "タイプ値が無効です: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "無効な更新です。結果として、同じ名前 %s でメタデータ定義名前空間が重複しま" "す。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無効な更新です。結果として、同じ name=%(name)s で、namespace=" "%(namespace_name)s でメタデータ定義オブジェクトが重複します。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無効な更新です。結果として、同じ name=%(name)s で、namespace=" "%(namespace_name)s でメタデータ定義オブジェクトが重複します。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無効な更新です。結果として、同じ name=%(name)s で、namespace=" "%(namespace_name)s でメタデータ定義プロパティーが重複します。" #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "パラメーター '%(param)s' の値 '%(value)s' が無効です: %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "オプション %(option)s の値が無効です: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "無効な可視性の値: %s" msgid "Is not allowed value" msgstr "許可値ではありません" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "%s='yes' を設定する前に eventlet モジュールがインポートされた可能性がありま" "す。eventlet.greendns は現在 IPv6 アドレスに準拠していないため、ipv6 を使用し" "ている場合は eventlet.greendns を無効にする必要があります。この設定を行う前" "に eventlet をインポートしないようにしてください。" msgid "It's invalid to provide multiple image sources." msgstr "イメージソースの複数指定は無効です。" msgid "It's not allowed to add locations if locations are invisible." msgstr "ロケーションが表示されない場合、ロケーションを追加できません。" msgid "It's not allowed to remove locations if locations are invisible." msgstr "ロケーションが表示されない場合、ロケーションを削除できません。" msgid "It's not allowed to update locations if locations are invisible." msgstr "ロケーションが表示されない場合、ロケーションを更新できません。" msgid "Items have to be unique" msgstr "項目は固有でなければなりません" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "Json パスの先頭は「/」でなければならず、末尾は「/」であってはなりません。後続" "の 2 つの「/」は使用できません。" msgid "Legacy image was not found" msgstr "レガシーイメージが見つかりませんでした" msgid "Length is greater than maximum" msgstr "長さが最大値を超えています" msgid "Length is less than minimum" msgstr "長さが最小値を下回っています" msgid "Limit param must be an integer" msgstr "Limit パラメーターは整数でなければなりません" msgid "Limit param must be positive" msgstr "Limit パラメーターは正の値でなければなりません" #, python-format msgid "Limit param must not be higher than %d" msgstr "Limit パラメーターは %d 以上であってはなりません" msgid "Limits request ID length." msgstr "要求 ID の長さを制限します。" msgid "List definitions may hot have defaults" msgstr "リスト定義にデフォルトがない可能性があります" msgid "List of strings related to the image" msgstr "イメージに関連する文字列のリスト" msgid "List size is greater than maximum" msgstr "リストサイズが最大を上回っています" msgid "List size is less than minimum" msgstr "リストサイズが最小を下回っています" msgid "Loop time between checking for new items to schedule for delete." msgstr "新しい項目の削除スケジュールを検査するときのループ時間。" #, python-format msgid "Malformed Content-Range header: %s" msgstr "コンテンツ範囲ヘッダーの形式が正しくありません: %s" msgid "Malformed JSON in request body." msgstr "要求本体の JSON の形式が誤りです。" msgid "Max string length may not exceed 255 characters" msgstr "最大文字列長を 255 文字より大きくすることはできません" msgid "Maximal age is count of days since epoch." msgstr "最長存続時間は、エポック以降の日数です。" msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "受け入れられるメッセージヘッダーの最大行サイズ。大きなトークン (通常は、" "Keystone v3 API で大きなサービスカタログを使用して生成されるトークン) を使用" "するときは max_header_line を増やさなければならない場合があります。" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" "イメージあたりの最大イメージメンバー数。負の値は無制限値に評価されます。 " msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "1 つのイメージに許可される最大ロケーション数。負の値は無制限値に評価されま" "す。" msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "" "1 つのイメージに許可される最大プロパティー数。負の値は無制限値に評価されま" "す。" msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "1 つのイメージに許可される最大タグ数。負の値は無制限値に評価されます。 " msgid "Maximum permissible number of items that could be returned by a request" msgstr "要求によって返されることが可能な最大許容項目数" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "最大リダイレクト数 (%(redirects)s) を超えました。" msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "ユーザーがアップロードできるイメージの最大サイズ (バイト)。デフォルトは" "1099511627776 バイト (1 TB) です。警告: この値を増加する場合は慎重な検討が必" "要であり、8 EB (9223372036854775808) より小さい値に設定する必要があります。" #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "イメージ %(image_id)s のメンバー %(member_id)s が重複しています" msgid "Member can't be empty" msgstr "「メンバー」は空にできません" msgid "Member to be added not specified" msgstr "追加するメンバーが指定されていません" msgid "Membership could not be found." msgstr "メンバーシップが見つかりませんでした。" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "メタデータ定義名前空間 %(namespace)s は保護されており、削除できません。" #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "id=%s のメタデータ定義名前空間が見つかりません" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "メタデータ定義 namespace=%(namespace_name)s が見つかりませんでした。" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "メタデータ定義オブジェクト %(object_name)s は保護されており、削除できません。" #, python-format msgid "Metadata definition object not found for id=%s" msgstr "id=%s のメタデータ定義オブジェクトが見つかりません" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "メタデータ定義プロパティー %(property_name)s は保護されており、削除できませ" "ん。" #, python-format msgid "Metadata definition property not found for id=%s" msgstr "id=%s のメタデータ定義プロパティーが見つかりません" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "メタデータ定義リソースタイプ %(resource_type_name)s はシードシステムタイプで" "あり、削除できません。" #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "メタデータ定義リソースタイプ関連付け %(resource_type)s は保護されており、削除" "できません。" #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "メタデータ定義タグ %(tag_name)s は保護されており、削除できません。" #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "id=%s のメタデータ定義タグが見つかりません" msgid "Min string length may not be negative" msgstr "最小文字列長は負にはできません" msgid "Minimal rows limit is 1." msgstr "最少行数制限は 1 です。" #, python-format msgid "Missing required credential: %(required)s" msgstr "必須の資格情報がありません: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "領域 %(region)s に対して複数の「イメージ」サービスが一致します。これは一般" "に、領域が必要であるのに、領域を指定していないことを意味します。" msgid "Must supply a positive, non-zero value for age." msgstr "存続期間にはゼロ以外の正の値を指定してください。" msgid "Name of the paste configuration file." msgstr "paste 設定ファイルの名前。" #, python-format msgid "No artifact found with ID %s" msgstr "ID が %s の成果物が見つかりません" msgid "No authenticated user" msgstr "認証されていないユーザー" #, python-format msgid "No image found with ID %s" msgstr "ID が %s であるイメージは見つかりません" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "イメージ %(img)s 内で ID が %(loc)s のロケーションは見つかりません" msgid "No permission to share that image" msgstr "そのイメージを共有する許可がありません" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "「%(name)s」のプラグインがロードされませんでした" msgid "No property to access" msgstr "アクセスするプロパティーがありません" #, python-format msgid "No such key '%s' in a dict" msgstr "辞書にはそのような鍵 '%s' はありません" #, python-format msgid "Not a blob property '%s'" msgstr "blob プロパティー '%s' ではありません" msgid "Not a downloadable entity" msgstr "ダウンロード可能なエンティティーではありません" msgid "Not a list property" msgstr "リストプロパティーではありません" #, python-format msgid "Not a list property '%s'" msgstr "リストプロパティー '%s' ではありません" msgid "Not a valid value type" msgstr "有効な値タイプではありません" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "一部の依存関係が '%s' 状態にあります" #, python-format msgid "Not allowed to create members for image %s." msgstr "イメージ %s のメンバーの作成は許可されていません。" #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "状況が「%s」であるイメージの非アクティブ化は許可されていません" #, python-format msgid "Not allowed to delete members for image %s." msgstr "イメージ %s のメンバーの削除は許可されていません。" #, python-format msgid "Not allowed to delete tags for image %s." msgstr "イメージ %s のタグの削除は許可されていません。" #, python-format msgid "Not allowed to list members for image %s." msgstr "イメージ %s のメンバーのリストは許可されていません。" #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "状況が「%s」であるイメージの再アクティブ化は許可されていません" #, python-format msgid "Not allowed to update members for image %s." msgstr "イメージ %s のメンバーの更新は許可されていません。" #, python-format msgid "Not allowed to update tags for image %s." msgstr "イメージ %s のタグの更新は許可されていません。" #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "イメージ %(image_id)s ではイメージデータのアップロードは許可されません: " "%(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "アレイインデックス '%s' ではありません" msgid "Number of sort dirs does not match the number of sort keys" msgstr "ソート方向の数がソートキーの数に一致しません" msgid "OVA extract is limited to admin" msgstr "OVA 抽出が実行できるのは管理者のみです" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "目的の OVF メタデータは ovf-metadata.json 設定ファイルに指定されませんでし" "た。\"cim_pasd\" を目的の CIM_ProcessorAllocationSettingData プロパティーのリ" "ストに指定してください。" msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "" "OVF プロパティーの設定ファイル \"ovf-metadata.json\" が見つかりませんでした。" msgid "Old and new sorting syntax cannot be combined" msgstr "新旧のソート構文を結合することはできません" msgid "Only list indexes are allowed for blob lists" msgstr "blob リストで許可されるのはリストインデックスのみです" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "操作 \"%s\" には \"value\" という名前のメンバーが必要です。" msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "操作オブジェクトには、\"add\"、\"remove\"、または \"replace\" という名前のメ" "ンバーを正確に 1 つだけ含める必要があります。" msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "操作オブジェクトには、\"add\"、\"remove\"、または \"replace\" という名前のメ" "ンバーを 1 つしか含められません。" msgid "Operations must be JSON objects." msgstr "操作は JSON オブジェクトでなければなりません。" #, python-format msgid "Operator %(op)s is not supported" msgstr "演算子 %(op)s はサポートされていません" #, fuzzy, python-format msgid "Original locations is not empty: %s" msgstr "元のロケーションは空ではありません: %s" msgid "Owner can't be updated by non admin." msgstr "管理者以外は所有者を更新できません。" msgid "Owner must be specified to create a tag." msgstr "タグを作成するには、所有者を指定する必要があります。" msgid "Owner of the image" msgstr "イメージの所有者" msgid "Owner of the namespace." msgstr "名前空間の所有者。" msgid "Param values can't contain 4 byte unicode." msgstr "Param 値に 4 バイトの Unicode が含まれていてはなりません。" msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "paste 設定ファイル内のパイプラインの名前の一部 (サービス名を含む) が削除され" "ました。例えば、貼り付けセクション名が [pipeline:glance-api-keystone] である" "場合は、値 \"keystone\" を使用します。" msgid "Path to the directory where json metadata files are stored" msgstr "JSON メタデータファイルが保管されるディレクトリーのパス" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" "プラグイン名「%(plugin)s」は成果物タイプ名「%(name)s」に一致している必要があ" "ります" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "ポインター `%s` に、認識されているエスケープシーケンスの一部ではない \"~\" が" "含まれています。" #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "ポインター `%s` に隣接する \"/\" が含まれています。" #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "ポインター `%s` に有効なトークンが含まれていません。" #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "ポインター `%s` の先頭が \"/\" ではありません。" #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "ポインター `%s` の末尾が \"/\" です。" msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "" "ポインターに、認識されているエスケープシーケンス [~0, ~1] の一部ではない「~」" "が含まれています。" #, python-format msgid "Port \"%s\" is not valid." msgstr "ポート \"%s\" が無効です。" msgid "Port the registry server is listening on." msgstr "レジストリーサーバーが listen しているポート。" #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "プレリリース数値コンポーネントが大きすぎます (最大文字数 %d)" msgid "Private key file to use when starting API server securely." msgstr "API サーバーを安全に始動するときに使用される秘密鍵ファイル。" #, python-format msgid "Process %d not running" msgstr "プロセス %d は実行されていません" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "データの保存前にプロパティー %s を設定する必要があります。" #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "プロパティー %(property_name)s の先頭が、想定されるリソースタイプ関連付けのプ" "レフィックス \"%(prefix)s\" ではありません。" #, python-format msgid "Property %s already present." msgstr "プロパティー %s は既に存在しています。" #, python-format msgid "Property %s does not exist." msgstr "プロパティー %s は存在しません。" #, python-format msgid "Property %s may not be removed." msgstr "プロパティー %s は削除できません。" #, python-format msgid "Property %s must be set prior to saving data." msgstr "データの保存前にプロパティー %s を設定する必要があります。" #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "プロパティー「%(name)s」に値「%(val)s」がない可能性があります: %(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "プロパティー '%s' は保護されています" msgid "Property names can't contain 4 byte unicode." msgstr "プロパティー名に 4 バイトの Unicode が含まれていてはなりません。" #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "ルール %(rule)s の操作 %(operation)s に対するプロパティー保護が見つかりませ" "ん。どの役割もこの操作の実行を許可されません。" #, python-format msgid "Property's %(prop)s value has not been found" msgstr "プロパティーの %(prop)s 値が見つかりませんでした" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "指定するイメージのサイズは、保管されているイメージのサイズと一致しなければな" "りません。(指定サイズ: %(ps)d、保管サイズ: %(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "" "指定されたオブジェクトがスキーマ '%(schema)s' と一致しません: %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "指定されたタスク状況はサポートされていません: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "指定されたタスクタイプはサポートされていません: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "分かりやすい名前空間の説明が提供されます。" msgid "Public images do not have members." msgstr "パブリックイメージにはメンバーがありません。" msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "バージョンエンドポイントに使用するパブリック URL。デフォルトは「なし」で、こ" "の場合は、要求の host_url 属性を使用して URL ベースにデータを追加します。" "Glance がプロキシーの背後で作動している場合は、この値をプロキシーの URL を表" "すように変更してください。" msgid "Python module path of data access API" msgstr "データアクセス API の Python モジュールパス" msgid "Received invalid HTTP redirect." msgstr "無効な HTTP リダイレクトを受け取りました。" #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "許可のために %(uri)s にリダイレクトしています。" #, python-format msgid "Registry service can't use %s" msgstr "レジストリーサービスでは %s を使用できません" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "レジストリーが API サーバーで正しく設定されていませんでした。理由: %(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "関係 %(name)s には複数の値を設定できません" #, python-format msgid "Reload of %(serv)s not supported" msgstr "%(serv)s の再ロードはサポートされていません" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "%(serv)s (pid %(pid)s) をシグナル (%(sig)s) により再ロード中" #, python-format msgid "Removing stale pid file %s" msgstr "失効した pid ファイル %s を削除中" msgid "Request body must be a JSON array of operation objects." msgstr "要求本文は、操作オブジェクトの JSON 配列でなければなりません。" msgid "Request must be a list of commands" msgstr "要求はコマンドのリストである必要があります" msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "" "シグニチャーの検証に必要なイメージプロパティーが存在しません。シグニチャーを" "検証できません。" #, python-format msgid "Required store %s is invalid" msgstr "必須のストア %s が無効です" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "可能であれば、リソースタイプ名を Heat リソースタイプと位置合わせします。" "http://docs.openstack.org/developer/heat/template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Keystone からの応答に Glance エンドポイントが含まれていません。" msgid "Role used to identify an authenticated user as administrator." msgstr "認証ユーザーを管理者として識別するために使用される役割。" msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "長期実行プロセスとして実行してください。指定されない場合 (デフォルト)、修正操" "作を一度実行してから終了します。指定されている場合、終了せずに、設定に指定さ" "れている wakeup_time 間隔で修正を実行します。" msgid "Scope of image accessibility" msgstr "イメージのアクセス可能性の範囲" msgid "Scope of namespace accessibility." msgstr "名前空間アクセシビリティーの範囲。" msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "" "メッセージをトレースする Glance API サービスおよび Glance レジストリーサービ" "スの署名に使用する秘密鍵。" #, python-format msgid "Server %(serv)s is stopped" msgstr "サーバー %(serv)s は停止しています" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "サーバーワーカーの作成に失敗しました: %(reason)s" msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "システム全体の割り当て量をユーザーごとに設定します。この値は、ユーザーがすべ" "てのストレージシステムで使用できる総容量です。0 の値は無制限を意味します。値" "にオプションの単位を指定できます。使用できる単位は B、KB、MB、GB、および TB " "です (それぞれ、バイト、キロバイト、メガバイト、ギガバイト、およびテラバイト" "を表します)。単位を指定しない場合は、バイトが想定されます。値と単位の間にス" "ペースを入れてはならないこと、および単位には大/小文字の区別があることに注意し" "てください。" #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "表示レベル %(shl)s はこの操作ではサポートされていません" msgid "Signature verification failed" msgstr "シグニチャーの検証が失敗しました" msgid "Signature verification failed." msgstr "シグニチャーの検証が失敗しました。" msgid "Size of image file in bytes" msgstr "イメージファイルのサイズ (バイト)" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "一部のリソースタイプでは、インスタンスごとに複数のキー/値のペアが許可されてい" "ます。例えば、Cinder はボリューム上のユーザーおよびイメージメタデータを許可し" "ています。イメージプロパティーメタデータのみ、Nova (スケジュールまたはドライ" "バー) によって評価されます。このプロパティーによって、名前空間ターゲットから" "あいまいさを排除できます。" msgid "Sort direction supplied was not valid." msgstr "指定されたソート方向が無効でした。" msgid "Sort key supplied was not valid." msgstr "指定されたソートキーが無効でした。" msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "指定されたリソースタイプに使用するプレフィックスを指定します。名前空間にある" "プロパティーはすべて、指定されたリソースタイプに適用されるときに、このプレ" "フィックスが先頭に付けられます。コロン (:) などのプレフィックス区切り文字を組" "み込む必要があります。" msgid "Specifies which task executor to be used to run the task scripts." msgstr "タスクスクリプトの実行に使用されるタスク実行プログラムを指定します。" msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "状況は、\"保留中\"、\"受諾\"、または\"拒否\" でなければなりません。" msgid "Status not specified" msgstr "状況が指定されていません" msgid "Status of the image" msgstr "イメージの状態" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "%(cur_status)s から %(new_status)s への状況遷移は許可されません" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "%(serv)s (pid %(pid)s) をシグナル (%(sig)s) により停止中" #, python-format msgid "Store for image_id not found: %s" msgstr "image_id のストアが見つかりません: %s" #, python-format msgid "Store for scheme %s not found" msgstr "スキーマ %s のストアが見つかりません" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "指定された %(attr)s (%(supplied)s) とアップロードされたイメージ (%(actual)s) " "から生成された %(attr)s が一致していませんでした。イメージの状況を「強制終了" "済み」に設定します。" msgid "Supported values for the 'container_format' image attribute" msgstr "'container_format' イメージ属性に対してサポートされる値" msgid "Supported values for the 'disk_format' image attribute" msgstr "'disk_format' イメージ属性に対してサポートされる値" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "%(serv)s として抑制された再作成は %(rsn)s でした。" msgid "System SIGHUP signal received." msgstr "システム SIGHUP シグナルを受信しました。" #, python-format msgid "Task '%s' is required" msgstr "タスク '%s' が必要です" msgid "Task does not exist" msgstr "タスクが存在しません" msgid "Task failed due to Internal Error" msgstr "内部エラーが原因でタスクが失敗しました" msgid "Task was not configured properly" msgstr "タスクが正しく設定されませんでした" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "指定された id %(task_id)s のタスクは見つかりませんでした" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "\"changes-since\" フィルターは v2 上で使用できなくなりました。" #, python-format msgid "The CA file you specified %s does not exist" msgstr "指定した CA ファイル %s は存在しません" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "このタスク %(task_id)s で作成されているイメージ %(image_id)s オブジェクトは以" "降の処理に有効な状況ではなくなりました。" msgid "The Store URI was malformed." msgstr "ストア URI の形式に誤りがありました。" msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "keystone サービスの URL。\"use_user_token\" が無効で、keystone 認証を使用して" "いる場合、keystone の URL を指定できます。" msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "Swift 認証サービスが listen しているアドレス。(非推奨)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "管理者パスワード。\"use_user_token\" が無効であれば、管理資格情報を指定できま" "す。" msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "管理者ユーザー名。\"use_user_token\" が無効であれば、管理資格情報を指定できま" "す。" msgid "The amount of time in seconds to delay before performing a delete." msgstr "削除を実行するまでの遅延時間 (秒)。" msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "キャッシュクリーナー (実行されている場合) が不完全なイメージを削除する前に、" "そのイメージがキャッシュ内に残ることができる時間。" msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "TCP リスナーソケットの作成時に使用されるバックログ値。" #, python-format msgid "The cert file you specified %s does not exist" msgstr "指定した証明書ファイル %s は存在しません" msgid "The config file that has the swift account(s)configs." msgstr "swift アカウント設定が指定されている設定ファイル。" msgid "The current status of this task" msgstr "このタスクの現行状況" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "イメージキャッシュディレクトリー %(image_cache_dir)s が格納されているデバイス" "では xattr はサポートされません。fstab を編集して、キャッシュディレクトリーが" "格納されているデバイスの該当する行に user_xattr オプションを追加しなければな" "らない可能性があります。" msgid "The driver to use for image cache management." msgstr "イメージキャッシュ管理に使用されるドライバー。" #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "バージョン %s の形式が無効です。semver 表記を使用してください" msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "自動的に変換されるイメージの変換後の形式。RBD バックエンドを使用する場合、こ" "の形式は 'raw' に設定する必要があります" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "指定した URI が無効です。次のサポートされている URI のリストから、有効な URI " "を指定してください: %(supported)s" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "デバッグ接続を listen する pydev プロセスのホスト名/IP" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "イメージ %s は既にスレーブ上にありますが、検査では見つかりませんでした。これ" "は、スレーブサーバー上のすべてのイメージを表示する許可を持っていないことを示" "します。" #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "入力成果物の blob が大きすぎます: %s" #, python-format msgid "The incoming image is too large: %s" msgstr "入力イメージが大きすぎます: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "指定した鍵ファイル %s は存在しません" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "許可されるイメージロケーション数の制限を超えました。試行: %(attempted)s、最" "大: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "このイメージに対して許可されるイメージメンバー数の制限を超えました。試行: " "%(attempted)s、最大: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "許可されるイメージプロパティー数の制限を超えました。試行: %(attempted)s、最" "大: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "許可されるイメージプロパティー数の制限を超えました。試行: %(num)s、最大: " "%(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "許可されるイメージタグ数の制限を超えました。試行: %(attempted)s、最大: " "%(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "ロケーション %(location)s は既に存在します" #, python-format msgid "The location data has an invalid ID: %d" msgstr "ロケーションデータの ID が無効です: %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "プロパティー保護ファイルのロケーション。このファイルには、プロパティー保護に" "関するルールと、それに関連付けられた役割/ポリシーが含まれています。この設定値" "が指定されていない場合、デフォルトでは、プロパティー保護は実施されません。値" "が指定されていてもファイルが見つからない場合、glance-api サービスは開始されま" "せん。" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "name=%(record_name)s のメタデータ定義 %(record_type)s は削除されていません。" "他のレコードがまだこのメタデータ定義を参照しています。" #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "メタデータ定義 namespace=%(namespace_name)s は既に存在します。" #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "name=%(object_name)s のメタデータ定義オブジェクトが、namespace=" "%(namespace_name)s に見つかりませんでした。" #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "name=%(property_name)s のメタデータ定義プロパティーは、namespace=" "%(namespace_name)s に見つかりませんでした。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "resource-type=%(resource_type_name)s の、namespace=%(namespace_name)s へのメ" "タデータ定義リソースタイプ関連付けは、既に存在します。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "resource-type=%(resource_type_name)s の、namespace=%(namespace_name)s へのメ" "タデータ定義リソースタイプ関連付けが見つかりませんでした。" #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "name=%(resource_type_name)s のメタデータ定義リソースタイプが見つかりませんで" "した。" #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "name=%(name)s のメタデータ定義タグが namespace=%(namespace_name)s に見つかり" "ませんでした。" msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "" "エンジンが実行されるモード。「serial」または「parallel」を指定できます。" msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "要求を処理するために作成される子プロセスワーカーの数。デフォルトは、使用可能" "な CPU の数と等しくなります。" msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "エンジンで同時に実行されるパラレルアクティビティーの数。エンジンモードが" "「parallel」の場合は、値を 1 より大きくすることができます。" msgid "The parameters required by task, JSON blob" msgstr "タスクによって要求されるパラメーター、JSON blob" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "レジストリーサーバーへの SSL 接続で使用する証明書ファイルへのパス (存在する場" "合)。または、GLANCE_CLIENT_CERT_FILE 環境変数に CA 証明書ファイルへのファイル" "パスを設定することもできます。" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "レジストリーサーバーへの SSL 接続で使用する認証局証明書ファイルへのパス(存在" "する場合)。または、GLANCE_CLIENT_CA_FILE 環境変数に CA 証明書ファイルへのファ" "イルパスを設定することもできます。" msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "レジストリーサーバーへの SSL 接続で使用する鍵ファイルへのパス (存在する場" "合)。または、GLANCE_CLIENT_KEY_FILE 環境変数に鍵ファイルへのファイルパスを設" "定することもできます。" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "イメージキャッシュ管理に使用される sqlite ファイルデータベースのパス。" msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "API サーバーがレジストリー要求の完了を待機する期間 (秒)。値 0 は、タイムアウ" "トなしを示します。" msgid "The port on which a pydev process is listening for connections." msgstr "pydev プロセスが接続を listen するポート。" msgid "The port on which the server will listen." msgstr "サーバーが listen するポート。" msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "" "レジストリーサーバーとの通信に使用されるプロトコル。http または https のいず" "れか。" #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "指定された本体 %(body)s は指定のスキーマ %(schema)s 下では無効です" msgid "The provided image is too large." msgstr "指定されたイメージが大きすぎます。" #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "指定されたパス「%(path)s」は無効です: %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "新規イメージを追加するために使用するデフォルトの swift アカウント/バッキング" "ストアパラメーターへの参照。" msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "認証サービスの領域。\"use_user_token\" が無効で、keystone 認証を使用している" "場合、領域名を指定できます。" msgid "The request returned 500 Internal Server Error." msgstr "要求で「500 Internal Server Error」が返されました。" msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "要求で「503 Service Unavailable」が返されました。これは一般に、サービスの過負" "荷または他の一時的な障害時に起こります。" #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "要求が「302 Multiple Choices」を返しました。これは通常、要求 URI にバージョン" "標識を含めなかったことを意味します。\n" "\n" "返された応答の本体:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "要求で「413 Request Entity Too Large」が返されました。これは一般に、速度制限" "または割り当て量のしきい値に違反したことを意味します。\n" "\n" "応答本体:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "要求で予期しない状況が返されました: %(status)s。\n" "\n" "応答本体:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "要求されたイメージは非アクティブ化されています。イメージデータのダウンロード" "は禁止されています。" msgid "The result of current task, JSON blob" msgstr "現行タスクの結果、JSON blob" msgid "The signature data was not properly encoded using base64" msgstr "" "このシグニチャーデータは base64 を使用して適切にエンコードされませんでした" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "データのサイズ %(image_size)s が制限を超えます。%(remaining)s バイト残されて" "います。" msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "イメージのスクラブに使用されるスレッドプールのサイズ。デフォルトはシリアルス" "クラブを示す 1 です。1 より大きい値はすべて、並行してスクラブが行われるイメー" "ジの最大数を示します。" #, python-format msgid "The specified member %s could not be found" msgstr "指定されたメンバー %s は見つかりませんでした" #, python-format msgid "The specified metadata object %s could not be found" msgstr "指定されたメタデータオブジェクト %s は見つかりませんでした" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "指定されたメタデータタグ %s が見つかりませんでした" #, python-format msgid "The specified namespace %s could not be found" msgstr "指定された名前空間 %s は見つかりませんでした" #, python-format msgid "The specified property %s could not be found" msgstr "指定されたプロパティー %s は見つかりませんでした" #, python-format msgid "The specified resource type %s could not be found " msgstr "指定されたリソースタイプ %s は見つかりませんでした" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "削除されたイメージロケーションの状況は「pending_delete」または「deleted」にの" "み設定できます" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "削除されたイメージロケーションの状況は「pending_delete」または「deleted」にの" "み設定できます。" msgid "The status of this image member" msgstr "このイメージメンバーの状況" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "ストア優先順序を取得するために使用するストア名。この名前は、「stores」設定オ" "プションで定義されたストアの 1 つによって登録されていなければなりません。この" "オプションは、「location_strategy」設定オプションで定義されたイメージロケー" "ションストラテジーとして「store_type」オプションを使用する場合に適用されま" "す。" msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "認証に使用されるストラテジー。\"use_user_token\" が無効であれば、認証ストラテ" "ジーを指定できます。" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "ターゲットメンバー %(member_id)s はイメージ %(image_id)s に既に関連付けられて" "います。" msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "管理ユーザーのテナント名。\"use_user_token\" が無効であれば、管理テナント名を" "指定できます。" msgid "The type of task represented by this content" msgstr "このコンテンツによって表されるタスクのタイプ" msgid "The unique namespace text." msgstr "固有の名前空間テキスト。" msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "この値を超えるとキャッシュプルーナー (実行されている場合) がイメージキャッ" "シュのクリーニングを開始する上限値 (累積キャッシュの最大サイズ (バイト))。" msgid "The user friendly name for the namespace. Used by UI if available." msgstr "名前空間の分かりやすい名前。存在する場合は、UI によって使用されます。" msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "Swift 認証サービスに対して認証するユーザー (非推奨)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "ソケットオプション TCP_KEEPIDLE の値。これは、TCP でキープアライブプローブの" "送信が開始される前に接続がアイドルでなければならない時間 (秒) です。" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "%(error_key_name)s %(error_filename)s に関して問題があります。確認してくださ" "い。エラー: %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "%(error_key_name)s %(error_filename)s に関して問題があります。確認してくださ" "い。OpenSSL エラー: %(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "ご使用の鍵ペアに関して問題があります。証明書 %(cert_file)s と鍵 %(key_file)s " "がペアになっていることを確認してください。OpenSSL エラー %(ce)s" msgid "There was an error configuring the client." msgstr "クライアントの設定中にエラーが発生しました。" msgid "There was an error connecting to a server" msgstr "サーバーへの接続中にエラーが発生しました" msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "この設定値は、プロパティー保護ファイルで \"roles\" と \"policies\" のどちらが" "使用されるかを示します。" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "この操作は、Glance タスクでは現在許可されていません。これらのタスクは、" "expires_at プロパティーに基づき、時間に達すると自動的に削除されます。" msgid "This operation is currently not permitted on Glance images details." msgstr "この操作は、Glance イメージの詳細では現在許可されていません。" msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "この値は、イメージ location order を決定するために使用されるストラテジーを設" "定するものです。現在、Glance には、'location_order' および 'store_type' とい" "う 2 つのストラテジーがパッケージされています。" msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "成功または失敗の後でタスクが存続する時間 (時)" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "クライアント接続のソケット操作のタイムアウト時間。提供される接続がこの秒数の" "間アイドル状態にある場合、接続は終了します。値が '0' の場合、待機時間に制限が" "ないことを指します。" msgid "Too few arguments." msgstr "引数が少なすぎます。" msgid "Too few locations" msgstr "ロケーションが少なすぎます" msgid "Too many locations" msgstr "ロケーションが多すぎます" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "合計サイズは %(size)d バイト (イメージ数: %(img_count)d) です" msgid "Turn on/off delayed delete." msgstr "遅延した削除をオン/オフにします。" msgid "Type version has to be a valid semver string" msgstr "タイプバージョンは有効な semver 文字列でなければなりません" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "URI に複数回、スキームを指定することはできません。swift://user:pass@http://" "authurl.com/v1/container/obj のような URI を指定した場合は、次のように、swift" "+http:// スキームを使用するよう変更する必要があります。swift+http://user:" "pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "外部ストアに保持されているイメージファイルにアクセスするための URL" msgid "Unable to PUT to non-empty collection" msgstr "空でないコレクションに PUT することができません" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "pid ファイル %(pid)s を作成できません。非ルートとして実行しますか?\n" "一時ファイルにフォールバック中。次を使用して %(service)s サービスを\n" "停止できます: %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "不明な演算子 '%s' によってフィルター処理を行うことができません。" msgid "Unable to filter on a range with a non-numeric value." msgstr "非数値を含む範囲ではフィルタリングできません。" msgid "Unable to filter on a unknown operator." msgstr "不明な演算子に対してフィルター処理を行うことができません。" msgid "Unable to filter using the specified operator." msgstr "指定された演算子を使用してフィルター処理ができません。" msgid "Unable to filter using the specified range." msgstr "指定された範囲ではフィルタリングできません。" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "JSON スキーマの変更で '%s' が見つかりません" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "JSON スキーマの変更で `op` が見つかりません。以下のいずれかでなければなりませ" "ん: %(available)s。" msgid "Unable to get legacy image" msgstr "レガシーイメージを取得できません" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "ファイル記述子制限を増加できません。非ルートとして実行しますか?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "設定ファイル %(conf_file)s から %(app_name)s をロードできません。\n" "受け取ったエラー: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "スキーマをロードできません: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "%s の paste 設定ファイルが見つかりません。" msgid "Unable to modify collection in immutable or readonly property" msgstr "" "不変プロパティーまたは読み取り専用プロパティー内のコレクションは変更できませ" "ん" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "以下の ID の証明書を取得できません: %s" msgid "Unable to retrieve request id from context" msgstr "コンテキストから要求 ID を取得できません" msgid "Unable to specify artifact type explicitly" msgstr "アーティファクトタイプは、明示的には指定できません" msgid "Unable to specify artifact type version explicitly" msgstr "アーティファクトタイプのバージョンは、明示的には指定できません" msgid "Unable to specify version if multiple types are possible" msgstr "複数のタイプが指定可能な場合はバージョンを指定できません" msgid "Unable to specify version if type is not specified" msgstr "タイプが指定されていない場合はバージョンを指定できません" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "" "イメージ %(image_id)s の重複イメージデータはアップロードできません: %(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "" "このアルゴリズムはこのシステムでサポートされないため、シグニチャーを検証でき" "ません" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "シグニチャーを検証できません: %(reason)s" msgid "Unauthorized image access" msgstr "許可されていないイメージアクセス" msgid "Unexpected body type. Expected list/dict." msgstr "予期しない本文タイプ。予期されたのはリストまたは辞書です。" #, python-format msgid "Unexpected response: %s" msgstr "予期しない応答: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "不明な認証ストラテジー '%s'" #, python-format msgid "Unknown command: %s" msgstr "不明なコマンド: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません" msgid "Unrecognized JSON Schema draft version" msgstr "認識されない JSON スキーマのドラフトバージョン" msgid "Unrecognized changes-since value" msgstr "認識されない changes-since 値" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "サポートされない sort_dir です。許容値: %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "サポートされない sort_key です。許容値: %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "値 %(value)d は範囲外です。%(max)d を超えてはなりません" msgid "Value is greater than maximum" msgstr "値が最大値を超えています" msgid "Value is less than minimum" msgstr "値が最小値を下回っています" msgid "Value is required" msgstr "値が必要です" #, python-format msgid "Version component is too large (%d max)" msgstr "バージョンコンポーネントが大きすぎます (最大 %d)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "バージョンが無効です: %(reason)s" msgid "Virtual size of image in bytes" msgstr "イメージの仮想サイズ (バイト)" msgid "Visibility must be either \"public\" or \"private\"" msgstr "可視性は「public」または「private」のいずれかでなければなりません" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "pid %(pid)s (%(file)s) が停止するまで 15 秒お待ちください。中断中です" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "False の場合、available_plugins とは無関係に、成果物はロードできません。True " "の場合、成果物はロード可能です。" msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "サーバーを SSL モードで実行する場合は、cert_file オプション値と key_file オプ" "ション値の両方を設定ファイルに指定する必要があります" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "該当する場合は、このオプションによって、イメージの所有者がテナントに設定され" "ます。該当しない場合は、イメージの所有者が、要求を発行する認証ユーザーになり" "ます。" msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "レジストリーサーバーへの接続で SSL を使用する場合、認証局による検証は不要で" "す。これは、レジストリーで API に対して glanceclient を使用してコマンド行に " "--insecure を指定することと同等です。" msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "イメージスキーマが提供する内容以外のイメージプロパティーを指定することを、" "ユーザーに許可するかどうか" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "イメージプロパティーにバックエンドイメージロケーションを含めるかどうか。例え" "ば、ファイルシステムストアを使用している場合は、URL \"file:///path/to/image" "\" が \"direct_url\" メタデータフィールド内のユーザーに返されます。ストレージ" "ロケーションを公開するとセキュリティーリスクを招くおそれがあるため、この設定" "の使用には注意が必要です。これを true に設定すると show_image_direct_url オプ" "ションがオーバーライドされます。" msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "イメージプロパティーにバックエンドイメージストレージロケーションを含めるかど" "うか。ストレージロケーションを明らかにするとセキュリティーリスクを招くおそれ" "があるため、この設定は慎重に使用してください。" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "レジストリーに対して要求を行うときに、ユーザーおよびテナントの情報が含まれた" "ヘッダーをパススルーするかどうか。ヘッダーをパススルーすると、レジストリー" "は、keystonemiddleware の auth_token ミドルウェアなしでコンテキストミドルウェ" "アを使用できるようになり、keystone 認証サービスが呼び出されなくなります。この" "オプションを使用するときは、auth_token ミドルウェア以外の方法で glance api と" "glance レジストリー間の通信が安全に行われるようにしてください。" msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "レジストリーに対して要求を行うときに、ユーザートークンをパススルーするかどう" "か。サイズの大きなファイルのアップロード中のトークンの有効期限切れに伴う障害" "を防ぐために、このパラメーターは False に設定することが推奨されま" "す。\"use_user_token\" が無効である場合は、管理者のクレデンシャルを指定できま" "す。" msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "非同期タスク操作用の作業ディレクトリー。ここで設定されるディレクトリーはイ" "メージでの操作 (通常、イメージが宛先ストアにインポートされる前の操作) に使用" "されます。作業ディレクトリーを指定する場合は、並行タスクがスペースを使い果た" "すことなく効率的に実行されるように、十分なスペースを確保してください。" "「max_workers」数 (または実行中ワーカー数) に平均イメージサイズ(例えば " "500MB) を乗算することによって、概算を行うことができます。イメージサイズの見積" "もりは、デプロイメントでの平均サイズに基づいて行う必要があります。実行中のタ" "スクによっては、この数値に、タスクの実行内容に応じた何らかの係数を乗算する必" "要があることに注意してください。例えば、イメージ変換が有効になっている場合" "は、使用可能なサイズを 2 倍にする必要が生じることがあります。ただし、これらは" "見積もりにすぎず、最悪のシナリオを想定して見積もりを行う必要があり、見積もり" "が間違っている場合の対策を準備しておく必要があることを覚えておいてください。" #, python-format msgid "Wrong command structure: %s" msgstr "正しくないコマンド構造: %s" msgid "You are not authenticated." msgstr "認証されていません。" msgid "You are not authorized to complete this action." msgstr "このアクションの実行を許可されていません。" #, python-format msgid "You are not authorized to lookup image %s." msgstr "イメージ %s を調べる権限がありません。" #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "イメージ %s のメンバーを調べる権限がありません。" #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "'%s' が所有する名前空間でのタグの作成は許可されていません" msgid "You are not permitted to create image members for the image." msgstr "そのイメージのイメージメンバーの作成は許可されていません。" #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "'%s' によって所有されているイメージの作成は許可されていません。" #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "'%s' によって所有される名前空間の作成は許可されません" #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "'%s' によって所有されるオブジェクトの作成は許可されません" #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "'%s' によって所有されるプロパティーの作成は許可されません" #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "'%s' によって所有される resource_type の作成は許可されません" #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "所有者 %s を使用してこのタスクを作成することは許可されません" msgid "You are not permitted to deactivate this image." msgstr "このイメージの非アクティブ化は許可されていません。" msgid "You are not permitted to delete this image." msgstr "このイメージの削除は許可されていません。" msgid "You are not permitted to delete this meta_resource_type." msgstr "この meta_resource_type の削除は許可されません。" msgid "You are not permitted to delete this namespace." msgstr "この名前空間の削除は許可されません。" msgid "You are not permitted to delete this object." msgstr "このオブジェクトの削除は許可されません。" msgid "You are not permitted to delete this property." msgstr "このプロパティーの削除は許可されません。" msgid "You are not permitted to delete this tag." msgstr "このタグの削除は許可されていません。" #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "この %(resource)s 上の '%(attr)s' の変更は許可されません。" #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "このイメージ上の '%s' の変更は許可されません。" msgid "You are not permitted to modify locations for this image." msgstr "このイメージのロケーションの変更は許可されていません。" msgid "You are not permitted to modify tags on this image." msgstr "このイメージ上のタグの変更は許可されていません。" msgid "You are not permitted to modify this image." msgstr "このイメージの変更は許可されていません。" msgid "You are not permitted to reactivate this image." msgstr "このイメージの再アクティブ化は許可されていません。" msgid "You are not permitted to set status on this task." msgstr "このタスクに関する状況を設定することは許可されません。" msgid "You are not permitted to update this namespace." msgstr "この名前空間の更新は許可されません。" msgid "You are not permitted to update this object." msgstr "このオブジェクトの更新は許可されません。" msgid "You are not permitted to update this property." msgstr "このプロパティーの更新は許可されません。" msgid "You are not permitted to update this tag." msgstr "このタグの更新は許可されていません。" msgid "You are not permitted to upload data for this image." msgstr "このイメージのデータのアップロードは許可されていません。" #, python-format msgid "You cannot add image member for %s" msgstr "%s のイメージメンバーを追加できません" #, python-format msgid "You cannot delete image member for %s" msgstr "%s のイメージメンバーを削除できません" #, python-format msgid "You cannot get image member for %s" msgstr "%s のイメージメンバーを取得できません" #, python-format msgid "You cannot update image member %s" msgstr "イメージメンバー %s を更新できません" msgid "You do not own this image" msgstr "このイメージを所有していません" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "接続時に SSL を使用するよう選択し、証明書を指定しましたが、key_file パラメー" "ターを指定しなかったか、GLANCE_CLIENT_KEY_FILE 環境変数を設定しませんでした" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "接続時に SSL を使用するよう選択し、鍵を指定しましたが、cert_file パラメーター" "を指定しなかったか、GLANCE_CLIENT_CERT_FILE 環境変数を設定しませんでした" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() で予期しないキーワード引数 '%s' が得られました" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "更新で %(current)s から %(next)s に移行できません (from_state=%(from)s が必" "要)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "カスタムプロパティー (%(props)s) が基本プロパティーと競合しています" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "このプラットフォームでは eventlet の「poll」ハブも「selects」ハブも使用できま" "せん" msgid "is_public must be None, True, or False" msgstr "is_public は、None、True、または False でなければなりません" msgid "limit param must be an integer" msgstr "limit パラメーターは整数でなければなりません" msgid "limit param must be positive" msgstr "limit パラメーターは正でなければなりません" #, python-format msgid "location: %s data lost" msgstr "ロケーション: %s データが失われました" msgid "md5 hash of image contents." msgstr "イメージコンテンツの MD5 ハッシュ。" #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() で予期しないキーワード %s が得られました" msgid "protected must be True, or False" msgstr "protected は True または False でなければなりません" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "%(serv)s を起動できません。受け取ったエラー: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id が長すぎます。最大サイズは %s です" glance-12.0.0/glance/locale/pt_BR/0000775000567000056710000000000012701407204017657 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/pt_BR/LC_MESSAGES/0000775000567000056710000000000012701407204021444 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/pt_BR/LC_MESSAGES/glance-log-warning.po0000664000567000056710000001342312701407047025467 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Andre Campos Bezerra , 2015 # Rodrigo Felix de Almeida , 2014 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b2.dev74\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-15 11:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-07-15 12:39+0000\n" "Last-Translator: openstackjenkins \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "" "%(image_id)s: field %(key)s differs (source is %(master_value)s, destination " "is %(slave_value)s)" msgstr "" "%(image_id)s: campo %(key)s difere (origem é %(master_value)s, destino é " "%(slave_value)s)" #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "Acesso negado à imagem %(id)s, mas retornando 'não localizado'" #, python-format msgid "An optional task has failed, the failure was: %s" msgstr "Uma tarefa opcional falhou, a falha foi: %s" #, python-format msgid "Artifact with id=%s is not accessible" msgstr "Artefato com id=%s não está acessível" #, python-format msgid "Artifact with id=%s not found" msgstr "Artefato com id=%s não encontrado" msgid "Attempted to modify image user did not own." msgstr "Tentou modificar o usuário da imagem não de sua propriedade." #, python-format msgid "Cached image file '%s' doesn't exist, unable to delete" msgstr "O arquivo de imagem em cache '%s' não existe; não é possível excluir" #, python-format msgid "Could not find image %s" msgstr "Não foi possível localizar a imagem %s" #, python-format msgid "" "Could not find schema properties file %s. Continuing without custom " "properties" msgstr "" "Não foi possível localizar o arquivo de propriedades de esquema %s. " "Continuando sem propriedades customizadas" #, python-format msgid "Could not find task %s" msgstr "Não foi possível localizar tarefa %s" msgid "Deadlock detected. Retrying..." msgstr "Conflito detectado. Tentando novamente..." #, python-format msgid "Duplicate entry for values: %s" msgstr "Entrada duplicada para valores: %s" #, python-format msgid "" "Failed to activate image %s in registry. About to delete image bits from " "store and update status to 'killed'." msgstr "" "Falha ao ativar imagem %s no registro. Prestes a excluir bits de imagem do " "armazenamento e atualizar status para 'encerrado'." #, python-format msgid "Failed to decrypt location value for image %(image_id)s" msgstr "Falha ao descriptografar valor do local para imagem %(image_id)s" #, python-format msgid "Failed to delete file %(path)s. Got error: %(e)s" msgstr "Falha ao excluir arquivo %(path)s. Erro recebido: %(e)s" #, python-format msgid "Failed to delete image %s in store from URI" msgstr "Falha ao excluir imagem %s no armazenamento da URI" #, python-format msgid "Failed to find task %(task_id)s. Reason: %(reason)s" msgstr "Falha ao encontrar tarefa %(task_id)s. Razão: %(reason)s" msgid "Failed to successfully cache all images in queue." msgstr "Falha ao armazenar em cache com êxito todas as imagens da fila." #, python-format msgid "" "Fetch of cache file failed (%(e)s), rolling back by moving " "'%(incomplete_path)s' to '%(invalid_path)s'" msgstr "" "Falha na busca do arquivo de cache (%(e)s), recuperando ao mover " "'%(incomplete_path)s' para '%(invalid_path)s'" #, python-format msgid "Forbidden to create task. Reason: %(reason)s" msgstr "Proibido criar tarefa. Razão: %(reason)s" #, python-format msgid "Forbidden to get task %(task_id)s. Reason: %(reason)s" msgstr "Proibido obter tarefa %(task_id)s. Razão: %(reason)s" msgid "Id not in sort_keys; is sort_keys unique?" msgstr "ID não em sort_keys; sort_keys é exclusivo?" #, python-format msgid "Image %s entirely missing from the destination" msgstr "Imagem %s faltando inteiramente no destino" #, python-format msgid "Image '%s' is not active. Not caching." msgstr "A imagem '%s' não está ativa. Sem armazenamento em cache." #, python-format msgid "" "Image cache driver '%(driver_module)s' failed to configure. Got error: " "'%(config_err)s" msgstr "" "O driver de cache de imagem '%(driver_module)s' falhou ao ser configurado. " "Erro obtido: '%(config_err)s" #, python-format msgid "" "Image cache driver '%(driver_name)s' failed to load. Got error: " "'%(import_err)s." msgstr "" "O driver de cache de imagem '%(driver_name)s' falhou ao ser carregado. Erro " "obtido: '%(import_err)s." #, python-format msgid "Invalid marker. Image %(id)s could not be found." msgstr "Marcador Inválido. Imagem %(id)s não pôde ser encontrada." #, python-format msgid "Invalid membership association specified for image %(id)s" msgstr "Associação de membro inválida especificada para a imagem %(id)s" #, python-format msgid "Member %(id)s not found" msgstr "Membro %(id)s não localizado" #, python-format msgid "No metadata found for image '%s'" msgstr "Nenhum metadado localizado para a imagem '%s'" #, python-format msgid "Task %(task_id)s failed with exception %(error)s" msgstr "Tarefa %(task_id)s falhou com exceção %(error)s" msgid "Unable to get deleted image" msgstr "Não é possível obter a imagem excluída" msgid "Unable to get unowned image" msgstr "Não é possível obter a imagem não possuída" #, python-format msgid "Unrecognised child %s" msgstr "Filho %s não reconhecido" #, python-format msgid "User lacks permission to share image %(id)s" msgstr "O usuário não tem permissão para compartilhar a imagem %(id)s" glance-12.0.0/glance/locale/pt_BR/LC_MESSAGES/glance-log-info.po0000664000567000056710000002062212701407047024754 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Andre Campos Bezerra , 2015 # Rodrigo Felix de Almeida , 2014 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b2.dev74\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-15 11:52+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-06-23 01:20+0000\n" "Last-Translator: openstackjenkins \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "%(task_id)s of %(task_type)s completed" msgstr "%(task_id)s de %(task_type)s completado" msgid "" "'metadata_encryption_key' was not specified in the config file or a config " "file was not specified. This means that this migration is a NOOP." msgstr "" "'metadata_encryption_key' não foi especificado no arquivo de configuração ou " "um arquivo de configuração não foi especificado. Isso significa que essa " "migração é um NOOP." #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "Acesso negado à imagem %(id)s, mas retornando 'não localizado'" msgid "All workers have terminated. Exiting" msgstr "Todos os trabalhadores foram finalizados. Saindo" #, python-format msgid "Artifact %s has been successfully loaded" msgstr "Artefato %s foi carregado com sucesso" #, python-format msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgstr "Chamando %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgid "Caught keyboard interrupt. Exiting." msgstr "Interrupção da captura de teclado. Saindo." #, python-format msgid "Child %d exiting normally" msgstr "Filho %d saindo normalmente" #, python-format msgid "Cleaning up %s after exceeding the quota" msgstr "Limpando %s após exceder a quota" #, python-format msgid "Cleaning up %s after exceeding the quota." msgstr "Realizando limpeza %s após exceder a cota." #, python-format msgid "Considering: %s" msgstr "Considerando: %s" #, python-format msgid "Could not find artifact %s" msgstr "Não foi possível encontrar artefato %s" msgid "Daemon Shutdown on KeyboardInterrupt" msgstr "Encerrando o daemon em KeyboardInterrupt" msgid "Defaulting to SQLite driver." msgstr "Padronizando para o driver SQLite." #, python-format msgid "Delete denied for public image %(id)s" msgstr "Exclusão negada para imagem pública %(id)s" #, python-format msgid "File %s loaded to database." msgstr "Arquivo %s carregado no banco de dados." #, python-format msgid "Image %(id)s not found" msgstr "Imagem %(id)s não localizada" #, python-format msgid "Image %s is being synced" msgstr "Imagem %s está sendo sincronizada" #, python-format msgid "Image %s is deactivated" msgstr "Imagem %s está desativada" #, python-format msgid "Image %s is reactivated" msgstr "Imagem %s está reativada." #, python-format msgid "Image %s metadata has changed" msgstr "Metadados da Imagem %s mudaram" #, python-format msgid "Image cache loaded driver '%s'." msgstr "O cache de imagem carregou o driver '%s'." msgid "Initialized gzip middleware" msgstr "Inicializado middleware gzip" msgid "Initialized image cache management middleware" msgstr "Middleware de gerenciamento do cache de imagem inicializado" msgid "Initialized image cache middleware" msgstr "Middleware do cache de imagem inicializado" #, python-format msgid "Initializing scrubber with configuration: %s" msgstr "Inicializando scrubber com configuração: %s" #, python-format msgid "" "Loading known task scripts for task_id %(task_id)s of type %(task_type)s" msgstr "" "Carregando scripts conhecidos de tarefas para task_id %(task_id)s de tipo " "%(task_type)s" msgid "Metadata loading finished" msgstr "Carregamento de metadados finalizado" #, python-format msgid "Namespace %(namespace)s saved in %(file)s" msgstr "Namespace %(namespace)s salvo em %(file)s" #, python-format msgid "Not queueing image '%s'. Already being written to cache" msgstr "Não enfileirando imagem %s'. Já está sendo gravada no cache" #, python-format msgid "Not queueing image '%s'. Already cached." msgstr "Não enfileirando imagem %s'. Já armazenada em cache." #, python-format msgid "Not queueing image '%s'. Already queued." msgstr "Não enfileirando a imagem '%s'. Já enfileirada." #, python-format msgid "Overwriting namespace %s" msgstr "Sobrescrevendo namespace %s" #, python-format msgid "Reaped %(reaped)s %(entry_type)s cache entries" msgstr "%(reaped)s %(entry_type)s entradas de cache coletadas" #, python-format msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" msgstr "" "Rejeitando solicitação de criação de imagem para o ID de imagem inválido " "'%(bad_id)s'" #, python-format msgid "Removed dead child %s" msgstr "Removendo filho terminado %s" #, python-format msgid "Removed invalid cache file %s" msgstr "Arquivo de cache inválido removido %s" #, python-format msgid "Removed stale child %s" msgstr "Removendo filho inativo %s" #, python-format msgid "Removed stalled cache file %s" msgstr "Arquivo de cache paralisado removido %s" #, python-format msgid "Returning %(funcname)s: %(output)s" msgstr "Retornando %(funcname)s: %(output)s" #, python-format msgid "Scrubbing image %(id)s from %(count)d locations." msgstr "Limpando imagem %(id)s dos locais %(count)d." #, python-format msgid "Skipping namespace %s. It already exists in the database." msgstr "Ignorando namespace %s. Já existe no banco de dados." #, python-format msgid "Started child %s" msgstr "Filho iniciado %s" #, python-format msgid "Starting %d workers" msgstr "Iniciando %d trabalhadores" #, python-format msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgstr "Iniciando Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgid "Starting single process server" msgstr "Iniciando servidor de processo único" #, python-format msgid "Storing: %s" msgstr "Armazenando: %s" #, python-format msgid "Successfully cached all %d images" msgstr "Armazenadas em cache com êxito todas as %d imagens" #, python-format msgid "Successfully created image %(id)s" msgstr "Imagem criada com êxito %(id)s" #, python-format msgid "Successfully deleted a membership from image %(id)s" msgstr "Excluída com êxito uma associação da imagem %(id)s" #, python-format msgid "Successfully deleted image %(id)s" msgstr "Imagem excluída com êxito %(id)s" #, python-format msgid "Successfully updated a membership for image %(id)s" msgstr "Atualizada com êxito uma associação para a imagem %(id)s" #, python-format msgid "Successfully updated memberships for image %(id)s" msgstr "Associações atualizadas com êxito para a imagem %(id)s" #, python-format msgid "Table %s has been cleared" msgstr "Tabela %s foi limpa" #, python-format msgid "Task %(task_id)s beginning import execution." msgstr "Tarefa %(task_id)s iniciando execução da importação." #, python-format msgid "Task %(task_id)s: Could not import image file %(image_data)s" msgstr "" "Tarefa %(task_id)s: Não pôde importar o arquivo de imagem %(image_data)s" #, python-format msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" msgstr "" "Tarefa %(task_id)s: Obtidos dados de uri de imagem %(data_uri)s a ser " "importada" #, python-format msgid "" "Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" msgstr "" "Status da tarefa [%(task_id)s] mudando de %(cur_status)s para %(new_status)s" msgid "Triggering asynchronous copy from external source" msgstr "Acionando cópia assíncrona da origem externa" msgid "Unable to get deleted image" msgstr "Não é possível obter a imagem excluída" #, python-format msgid "Update denied for public image %(id)s" msgstr "Atualização negada para imagem pública %(id)s" #, python-format msgid "Updating metadata for image %(id)s" msgstr "Atualizando metadados para a imagem %(id)s" #, python-format msgid "Uploaded data of image %s from request payload successfully." msgstr "Enviados com sucesso dados da imagem %s da requisição de carga útil." #, python-format msgid "creating table %(table)s" msgstr "criando tabela %(table)s" #, python-format msgid "dropping table %(table)s" msgstr "descartando tabela %(table)s" glance-12.0.0/glance/locale/pt_BR/LC_MESSAGES/glance.po0000664000567000056710000034360712701407051023252 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Gabriel Wainer, 2013 # Gabriel Wainer, 2013 # Rodrigo Felix de Almeida , 2014 # Volmar Oliveira Junior , 2013 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Carlos Marques , 2016. #zanata # Eric Baum , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev4\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-21 10:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-21 01:50+0000\n" "Last-Translator: Carlos Marques \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s precisa ser uma sequência" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s é necessário" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s não pode ser mais longo que %(length)i" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s não pode ser mais curto que %(length)i" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s deve corresponder ao padrão %(pattern)s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "exceção %(cls)s foi disparada na última chamada RPC: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s não localizado na lista de membros da imagem %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) está em execução..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s parece já estar em execução: %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s é registrado como um módulo duas vezes. %(module)s não está " "sendo usado." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s de %(task_type)s não foi configurado adequadamente. Não foi " "possível carregar o armazenamento de sistema de arquivos" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s de %(task_type)s não foi configurado adequadamente. Faltando o " "diretório de trabalho: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)sing %(serv)s com %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Especifique um par host:porta, em que o host é um endereço IPv4, IPv6, " "nome do host ou FQDN. Se você estiver usando um endereço IPv6, coloque-o nos " "suportes separadamente da porta (ou seja, \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s não pode conter caracteres de unicode de 4 bytes." #, python-format msgid "%s is already stopped" msgstr "%s já está parado" #, python-format msgid "%s is stopped" msgstr "%s está parado" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "Valor '%(param)s' fora da faixa; não deve exceder %(max)d" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "opção --os_auth_url ou variável de ambiente OS_AUTH_URL requerida quando " "estratégia de autenticação keystone está ativada\n" msgid "A body is not expected with this request." msgstr "Um corpo não é esperado com essa solicitação." msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "Uma lista de artefatos que são permtiidos no nome do formato ou na versão do " "nome. Uma lista vazia significa que qualquer artefato pode ser carregado." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Um objeto de definição de metadados com o nome=%(object_name)s já existe no " "namespace=%(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Uma propriedade de definição de metadados com o nome=%(property_name)s já " "existe no namespace=%(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Um tipo de recurso de definição de metadados com o nome=" "%(resource_type_name)s já existe." #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "Uma identificação de metadados com o nome=%(name)s já existe no namespace=" "%(namespace_name)s." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Um conjunto de URLs para acessar o arquivo de imagem mantido em " "armazenamento externo" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "A chave AES para criptografar metadados 'location' de armazenamento. Isso " "inclui, se usado, Credenciais Swift ou S3. Deve ser configurado para uma " "sequência aleatória de comprimento de 16, 24 ou 32 bytes" msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" "Endereço para associar o servidor. Útil quando selecionando uma interface " "de rede particular." msgid "Address to find the registry server." msgstr "Endereço para encontrar o servidor de registro." msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "Permite usuários não autenticados acessarem a API com privilégios de somente " "leitura. Isto só se aplica quando utilizando ContextMiddleware." #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "Os valores permitidos %s são inválidos nos validadores fornecidos" msgid "Amount of disk space (in GB) required to boot image." msgstr "" "Quantidade de espaço em disco (em GB) necessária para a imagem de " "inicialização." msgid "Amount of ram (in MB) required to boot image." msgstr "Quantidade de ram (em MB) necessária para a imagem de inicialização." msgid "An identifier for the image" msgstr "Um identificador para a imagem" msgid "An identifier for the image member (tenantId)" msgstr "Um identificador para o membro de imagem (tenantId)" msgid "An identifier for the owner of this task" msgstr "Um identificador para o proprietário desta tarefa" msgid "An identifier for the task" msgstr "Um identificador para a tarefa" msgid "An image file url" msgstr "Uma URL de arquivo de imagem" msgid "An image schema url" msgstr "Uma URL de esquema de imagem" msgid "An image self url" msgstr "Uma URL automática de imagem" #, python-format msgid "An image with identifier %s already exists" msgstr "Uma imagem com o identificador %s já existe" msgid "An import task exception occurred" msgstr "Ocorreu uma exceção em uma tarefa importante" msgid "An object with the same identifier already exists." msgstr "Um objeto com o mesmo identificador já existe." msgid "An object with the same identifier is currently being operated on." msgstr "Um objeto com o mesmo identificador está atualmente sendo operado." msgid "An object with the specified identifier was not found." msgstr "Um objeto com o identificador especificado não foi localizado." msgid "An unknown exception occurred" msgstr "Ocorreu uma exceção desconhecida" msgid "An unknown task exception occurred" msgstr "Ocorreu uma exceção de tarefa desconhecida" #, python-format msgid "Array has no element at position %d" msgstr "A matriz não tem nenhum elemento na posição %d" msgid "Array property can't have item_type=Array" msgstr "A propriedade matriz não pode ter item_type=Array" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "O artefato %s não pôde ser excluído porque está em uso: %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "O artefato não pode alterar o estado de %(source)s para %(target)s" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "O artefato excede a cota de armazenamento: %s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "O artefato não possui a propriedade %(prop)s" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "O estado do artefato não pode ser alterado de %(curr)s para %(to)s" #, python-format msgid "Artifact storage media is full: %s" msgstr "A mídia de armazenamento de artefato está cheia: %s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "" "O tipo de artefato com nome '%(name)s' e versão '%(version)s' não é conhecido" msgid "Artifact with a circular dependency can not be created" msgstr "O artefato com uma dependência circular não pode ser criado" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "O artefato com ID=%(id)s não é acessível" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "O artefato com ID=%(id)s não foi localizado" msgid "Artifact with the specified type, name and version already exists" msgstr "O artefato com o tipo, o nome e a versão especificados já existe" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "O artefato com o tipo, o nome e a versão especificados já possui a " "dependência direta=%(dep)s" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "O artefato com o tipo, o nome e a versão especificados já possui a " "dependência transitiva=%(dep)s" msgid "Attempt to set readonly property" msgstr "Tentativa de configurar a propriedade somente leitura" msgid "Attempt to set value of immutable property" msgstr "Tentativa de configurar o valor da propriedade imutável" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "Tentativa de fazer upload de imagem duplicada: %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "Tentativa de atualizar o campo Local para uma imagem não está no status em " "fila." #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "O atributo '%(property)s' é somente leitura." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "O atributo '%(property)s' é reservado." #, python-format msgid "Attribute '%s' is read-only." msgstr "Atributo '%s' é apenas leitura." #, python-format msgid "Attribute '%s' is reserved." msgstr "Atributo '%s' é reservado." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "Atributo container_format pode ser apenas substituído por uma imagem na fila." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "Atributo disk_format pode ser apenas substituído por uma imagem na fila." msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" "A chave de autenticação do usuário autenticado com relação ao serviço de " "autenticação Swift. (descontinuado)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Serviço de autenticação na URL %(url)s não localizado." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Erro de autenticação - o token pode ter expirado durante o envio do arquivo. " "Removendo dados da imagem %s." msgid "Authorization failed." msgstr "Falha de autorização." msgid "Available categories:" msgstr "Categorias disponíveis:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Formato de filtro de consulta \"%s\" inválido. Use a notação ISO 8601 " "DateTime." #, python-format msgid "Bad Command: %s" msgstr "Comandos inválidos: %s" #, python-format msgid "Bad header: %(header_name)s" msgstr "Cabeçalho inválido: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "Valor inválido passado para o filtro %(filter)s obteve %(val)s" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "URI S3 malformado: %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Credenciais malformadas '%(creds)s' no URI Swift" msgid "Badly formed credentials in Swift URI." msgstr "Credenciais malformadas no URI Swift." msgid "Base directory that the image cache uses." msgstr "Diretório base que o cache de imagens utiliza." msgid "BinaryObject property cannot be declared mutable" msgstr "A propriedade BinaryObject não pode ser declarada mutável" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "O BLOB %(name)s não pode ter diversos valores" msgid "Blob size is not set" msgstr "O tamanho do BLOB não está configurado" msgid "Body expected in request." msgstr "Corpo esperado na solicitação." msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "" "O arquivo e o legacy_image_id não podem ser especificados ao mesmo tempo" msgid "CA certificate file to use to verify connecting clients." msgstr "" "Arquivo de certificado CA a ser utilizado para verifcar os clientes que se " "conectam." msgid "Cannot be a negative value" msgstr "Não pode ser um valor negativo" msgid "Cannot be a negative value." msgstr "Não pode ser um valor negativo." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "" "Não é possível converter a imagem %(key)s '%(value)s' para um número inteiro." msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "" "Não é possível declarar a propriedade de artefato com o nome reservado " "'metadata'" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "Não é possível carregar o artefato '%(name)s'" msgid "Cannot remove last location in the image." msgstr "Não é possível remover o último local na imagem." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "Não é possível salvar os dados da imagem %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "Não é possível configurar locais para esvaziar a lista." msgid "Cannot specify 'max_size' explicitly" msgstr "Não é possível especificar 'max_size' explicitamente" msgid "Cannot specify 'min_size' explicitly" msgstr "Não é possível especificar 'min_size' explicitamente" msgid "Cannot upload to an unqueued image" msgstr "Não é possível fazer upload para uma imagem fora da fila" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "Não é possível usar este parâmetro com o operador %(op)s" msgid "Certificate file to use when starting API server securely." msgstr "" "Arquivo de certificado a ser utilizado quando iniciar o servidor de API de " "forma segura." #, python-format msgid "Certificate format not supported: %s" msgstr "Formato de certificado não suportado: %s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "O certificado não é válido após: %s UTC" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "O certificado não é válido antes de: %s UTC" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "A soma de verificação falhou. Interrompido o armazenamento em cache da " "imagem '%s'." msgid "Client disconnected before sending all data to backend" msgstr "Cliente desconectado antes de enviar todos os dados para o backend" msgid "Command not found" msgstr "Comando não encontrado" msgid "Configuration option was not valid" msgstr "A opção de configuração não era válida" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Erro de conexão/solicitação inválida para serviço de autenticação na URL " "%(url)s." #, python-format msgid "Constructed URL: %s" msgstr "URL construída: %s" msgid "Container format is not specified." msgstr "O formato de contêiner não foi especificado." msgid "Content-Type must be application/octet-stream" msgstr "Tipo de Conteúdo deve ser application/octet-stream" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Download de imagem corrompido para a imagem %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Não foi possível ligar a %(host)s:%(port)s depois de tentar por 30 segundos" msgid "Could not find OVF file in OVA archive file." msgstr "Não foi possível localizar o arquivo OVF no archive OVA." #, python-format msgid "Could not find metadata object %s" msgstr "Não foi possível localizar o objeto de metadados %s" #, python-format msgid "Could not find metadata tag %s" msgstr "Não foi possível localizar a identificação de metadados %s" #, python-format msgid "Could not find namespace %s" msgstr "Não foi possível localizar o namespace %s" #, python-format msgid "Could not find property %s" msgstr "Não é possível localizar a propriedade %s" msgid "Could not find required configuration option" msgstr "Não foi possível localizar a opção de configuração necessária" #, python-format msgid "Could not find task %s" msgstr "Não foi possível localizar tarefa %s" #, python-format msgid "Could not update image: %s" msgstr "Não foi possível atualizar a imagem: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" "Atualmente, os pacotes OVA que contêm diversos discos não são suportados. " msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "A lista de validadores customizados deve conter tuplas '(função, mensagem)'" #, python-format msgid "Data for image_id not found: %s" msgstr "Dados de image_id não localizados: %s" msgid "Data supplied was not valid." msgstr "Os dados fornecidos não eram válidos." msgid "Date and time of image member creation" msgstr "Data e hora da criação de membro da imagem" msgid "Date and time of image registration" msgstr "Data e hora do registro da imagem " msgid "Date and time of last modification of image member" msgstr "Data e hora da última modificação de membro da imagem" msgid "Date and time of namespace creation" msgstr "Data e hora da criação do namespace" msgid "Date and time of object creation" msgstr "Data e hora da criação do objeto" msgid "Date and time of resource type association" msgstr "Data e hora da associação do tipo de recurso " msgid "Date and time of tag creation" msgstr "Data e hora da criação da identificação " msgid "Date and time of the last image modification" msgstr "Data e hora da última modificação da imagem " msgid "Date and time of the last namespace modification" msgstr "Data e hora da última modificação do namespace " msgid "Date and time of the last object modification" msgstr "Data e hora da última modificação do objeto" msgid "Date and time of the last resource type association modification" msgstr "Data e hora da última modificação de associação de tipo de recurso " msgid "Date and time of the last tag modification" msgstr "Data e hora da última modificação da identificação " msgid "Datetime when this resource was created" msgstr "Data/hora quando este recurso foi criado" msgid "Datetime when this resource was updated" msgstr "Data/Hora quando este recurso foi atualizado" msgid "Datetime when this resource would be subject to removal" msgstr "Data/Hora quando este recurso deve ser objeto de remoção" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "Valor padrão para o número de itens retornados por uma solicitação se não " "especificado explicitamente na solicitação" msgid "Default value is invalid" msgstr "O valor padrão é inválido" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "" "Negando a tentativa de fazer upload de artefato porque ele excede a cota: %s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "Negando a tentativa de upload da imagem porque ela excede a cota: %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "Negando tentativa de fazer upload de imagem maior que %d bytes." #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "A propriedade de dependência '%s' deve ser excluída primeiro" msgid "Dependency relations cannot be mutable" msgstr "As relações de dependência não podem ser mutáveis" msgid "Deploy the v1 OpenStack Images API." msgstr "Implemente a API de Imagens OpenStack v1." msgid "Deploy the v1 OpenStack Registry API." msgstr "Implemente a API Registry OpenStack v1." msgid "Deploy the v2 OpenStack Images API." msgstr "Implemente a API de Imagens OpenStack v2." msgid "Deploy the v2 OpenStack Registry API." msgstr "Implemente a API Registry OpenStack v2." msgid "Descriptive name for the image" msgstr "Nome descritivo para a imagem" msgid "Dictionary contains unexpected key(s)" msgstr "O dicionário contém chave(s) inesperada(s)" msgid "Dictionary size is greater than maximum" msgstr "O tamanho do dicionário é maior que o máximo" msgid "Dictionary size is less than minimum" msgstr "O tamanho do dicionário é menor que o mínimo" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "O algoritmo de compilação que será usado para assinatura digital. Use o " "comando \"openssl list-message-digest-algorithms\" para obter os algoritmos " "disponíveis suportados pela versão do OpenSSL na plataforma. Exemplos são " "\"sha1\", \"sha256\", \"sha512\", etc." msgid "Disk format is not specified." msgstr "O formato de disco não foi especificado." msgid "Does not match pattern" msgstr "Não corresponde ao padrão" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "O driver %(driver_name)s não pôde ser configurado corretamente. Motivo: " "%(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "Um arquivo ou um legacy_image_id precisa ser especificado" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Erro ao decodificar sua solicitação. A URL ou o corpo da solicitação " "continha caracteres que não puderam ser decodificados pelo Glance" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "Erro ao buscar membros da imagem %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "" "Erro na configuração do armazenamento. A inclusão de artefatos no " "armazenamento está desativada." msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Erro na configuração do armazenamento. A inclusão de imagens para " "armazenamento está desativada." msgid "Error occurred while creating the verifier" msgstr "Ocorreu um erro ao criar o verificador." msgid "Error occurred while verifying the signature" msgstr "Ocorreu um erro ao verificar a assinatura" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "O membro era esperado no formato: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "O estado era esperado no formato: {\"status\": \"status\"}" msgid "External source should not be empty" msgstr "A fonte externa não deve estar vazia" #, python-format msgid "External sources are not supported: '%s'" msgstr "As fontes externas não são suportadas: '%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "Falha ao ativar imagem. Erro obtido: %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "Falha ao incluir metadados da imagem. Erro obtido: %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "Falha ao localizar artefato %(artifact_id)s a ser excluído" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Falhar ao localizar a imagem %(image_id)s para excluir" #, python-format msgid "Failed to find image to delete: %s" msgstr "Falha ao encontrar imagem para excluir: %s" #, python-format msgid "Failed to find image to update: %s" msgstr "Falha ao encontrar imagem para atualizar: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Falha ao localizar o tipo de recurso %(resourcetype)s para excluir" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "Falha ao inicializar o banco de dados de cache da imagem. Erro obtido: %s" #, python-format msgid "Failed to read %s from config" msgstr "Falha ao ler %s da configuração" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "Falha ao reservar imagem. Erro obtido: %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "Falha ao atualizar metadados da imagem. Erro obtido: %s" #, python-format msgid "Failed to upload image %s" msgstr "Falha ao enviar imagem %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Falha ao fazer upload dos dados de imagem para a imagem %(image_id)s devido " "a erro de HTTP: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Falha ao fazer upload dos dados de imagem para a imagem %(image_id)s devido " "a erro interno: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "O arquivo %(path)s tem arquivo de backup inválido %(bfile)s, interrompendo." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Importações baseadas em arquivo não são permitidas. Use uma fonte não local " "de dados de imagem." msgid "File too large" msgstr "Arquivo muito grande" msgid "File too small" msgstr "Arquivo muito pequeno" msgid "Forbidden image access" msgstr "Proibido o acesso a imagem" #, python-format msgid "Forbidden to delete a %s image." msgstr "Proibido excluir uma imagem %s." #, python-format msgid "Forbidden to delete image: %s" msgstr "Proibido excluir imagem: %s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "Proibido modificar '%(key)s' da imagem %(status)s" #, python-format msgid "Forbidden to modify '%s' of image." msgstr "Proibido modificar '%s' de imagem." msgid "Forbidden to reserve image." msgstr "Proibido reservar imagem." msgid "Forbidden to update deleted image." msgstr "Proibido atualizar imagem excluída." #, python-format msgid "Forbidden to update image: %s" msgstr "Proibido atualizar imagem: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "Tentativa de upload proibida: %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Proibindo solicitação, o namespace de definição de metadados=%s não é " "visível." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Proibindo solicitação, a tarefa %s não está visível" msgid "Format of the container" msgstr "Formato do contêiner" msgid "Format of the disk" msgstr "Formato do disco" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "Falha ao obter dados de blob %(name)s: %(err)s." #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "Falha ao obter dados de imagem %(id)s: %(err)s." msgid "Glance client not installed" msgstr "Cliente Glance não instalado" #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" não é válido." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host e porta \"%s\" não são válidos." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Mensagem informativa legível apenas incluída quando apropriado (geralmente " "em falha)" msgid "If False doesn't trace SQL requests." msgstr "Se Falso não rastreia as requisições SQL." msgid "If False fully disable profiling feature." msgstr "Se Falso desabilita completamente a funcionalidade de profilling." msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "Se False, o servidor retornará o cabeçalho \"Connection: close\", Se True, o " "servidor irá retornar \"Connection: Keep-Alive\" em suas respostas. Para " "pedir para fechar a conexão do soquete do cliente explicitamente depois que " "a resposta for enviada e lida com êxito pelo cliente, você simplesmente terá " "que configurar esta opção como False ao criar um servidor wsgi." msgid "If true, image will not be deletable." msgstr "Se true, a imagem não será excluível." msgid "If true, namespace will not be deletable." msgstr "Se verdadeiro, o namespace não poderá ser excluído." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "" "A imagem %(id)s não pôde ser excluída, pois ela está sendo usada: %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "Imagem %(id)s não localizada" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Imagem %(image_id)s não pôde ser localizada após o upload. A imagem pode ter " "sido excluída durante o upload: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "A imagem %(image_id)s está protegida e não pode ser excluída." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "A imagem %s não pôde ser localizada após o upload. A imagem pode ter sido " "excluída durante o upload, limpando os chunks transferidos por upload." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "" "A imagem %s não foi encontrada após o envio. A imagem pode ter sido removida " "durante o envio." #, python-format msgid "Image %s is deactivated" msgstr "Imagem %s está desativada" #, python-format msgid "Image %s is not active" msgstr "A imagem %s não está ativa" #, python-format msgid "Image %s not found." msgstr "Imagem %s não localizada." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "Imagem excede a cota de armazenamento: %s" msgid "Image id is required." msgstr "ID da imagem é obrigatório." msgid "Image is protected" msgstr "A imagem está protegida" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "O limite do membro da imagem excedido para imagem %(id)s: %(e)s:" #, python-format msgid "Image name too long: %d" msgstr "Nome da imagem muito longo: %d" msgid "Image operation conflicts" msgstr "Conflitos da operação de imagem" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Transição de status de imagem de %(cur_status)s para %(new_status)s não é " "permitido" #, python-format msgid "Image storage media is full: %s" msgstr "A mídia de armazenamento da imagem está cheia: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "" "O limite de identificação da imagem excedeu para a imagem %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Problema ao fazer upload de imagem: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "A imagem o identificador %s já existe!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "Imagem com identificador %s foi excluída." #, python-format msgid "Image with identifier %s not found" msgstr "Imagem com identificador %s não localizada" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Imagem com o ID fornecido %(image_id)s não foi localizada" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Estratégia de autorização incorreta; esperava-se \"%(expected)s\", mas foi " "recebido \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Requisição incorreta: %s" msgid "Index is out of range" msgstr "O índice está fora do intervalo" msgid "Index is required" msgstr "O índice é obrigatório" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "A entrada não contém o campo '%(key)s'" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "Permissões insuficientes na mídia de armazenamento de artefato: %s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Permissões insuficientes na mídia de armazenamento da imagem: %s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "Content-Type inválido para trabalhar com %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Ponteiro de JSON inválido para este recurso: '/%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "Formato de certificado inválido: %s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "Soma de verificação inválida '%s': não pode exceder 32 caracteres" msgid "Invalid configuration in glance-swift conf file." msgstr "Configuração inválida no arquivo de configuração glance-swift." msgid "Invalid configuration in property protection file." msgstr "Configuração inválida no arquivo de proteção de propriedade." #, python-format msgid "Invalid container format '%s' for image." msgstr "Formato de Contâiner inválido '%s' para imagem." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Tipo de conteúdo inválido %(content_type)s" msgid "Invalid dict property type" msgstr "Tipo de propriedade de dicionário inválida" msgid "Invalid dict property type specification" msgstr "Especificação de tipo de propriedade de dicionário inválida" #, python-format msgid "Invalid disk format '%s' for image." msgstr "Formato de disco inválido '%s' para imagem." #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Valor de filtro inválido %s. A aspa não está fechada." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Valor de filtro inválido %s.Não há nenhuma vírgula antes da aspa de " "fechamento." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Valor de filtro inválido %s.Não há nenhuma vírgula antes da aspa de abertura." #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "Cabeçalhos \"Content-Type\" inválidos: %s" msgid "Invalid image id format" msgstr "Formato de ID da imagem inválido" msgid "Invalid item type specification" msgstr "Especificação de tipo de item inválida" #, python-format msgid "Invalid json body: %s" msgstr "Corpo json inválido: %s" msgid "Invalid jsonpatch request" msgstr "Solicitação de jsonpatch inválida" msgid "Invalid location" msgstr "Local inválido" #, python-format msgid "Invalid location %s" msgstr "Local inválido %s" #, python-format msgid "Invalid location: %s" msgstr "Localidade inválida: %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "Opção location_strategy inválida: %(name)s. A(s) opção(ões) de estratégia(s) " "válida(s) é(são): %(strategies)s" msgid "Invalid locations" msgstr "Locais inválidos" #, python-format msgid "Invalid locations: %s" msgstr "Localidades inválidas: %s" msgid "Invalid marker format" msgstr "Formato de marcador inválido" msgid "Invalid marker. Image could not be found." msgstr "Marcador inválido. A imagem não pôde ser localizada." #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "mask_gen_algorithm inválido: %s" #, python-format msgid "Invalid membership association: %s" msgstr "Associação inválida: %s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "Combinação inválida de formatos de disco e contêiner. Ao configurar um " "formato de disco ou contêiner para um destes, 'aki', 'ari' ou 'ami', os " "formatos de contêiner e disco devem corresponder." #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Operação inválida: `%(op)s`. Ela deve ser um das seguintes: %(available)s." msgid "Invalid position for adding a location." msgstr "Posição inválida para adicionar uma localidade." msgid "Invalid position for removing a location." msgstr "Posição inválida para remover uma localidade." msgid "Invalid property definition" msgstr "Definição de propriedade inválida" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "pss_salt_length inválido: %s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "Tipo de chave pública inválido para o tipo de chave de assinatura: %s" msgid "Invalid reference list specification" msgstr "Especificação de lista de referência inválida" msgid "Invalid referenced type" msgstr "Tipo referenciado inválido" msgid "Invalid request PATCH for work with blob" msgstr "PATCH de solicitação inválido para trabalhar com blob" msgid "Invalid service catalog json." msgstr "Catálogo de serviço json inválido." #, python-format msgid "Invalid signature hash method: %s" msgstr "Método de hash de assinatura inválido: %s" #, python-format msgid "Invalid signature key type: %s" msgstr "Tipo de chave de assinatura inválido: %s" #, python-format msgid "Invalid sort direction: %s" msgstr "Direção de classificação inválida: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "Chave de classificação inválida: %(sort_key)s. Se a versão do tipo não " "estiver configurada, ela deverá ser uma das seguintes: %(available)s." #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Chave de classificação inválida: %(sort_key)s. Deve ser um dos seguintes: " "%(available)s." #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" "Chave de classificação inválida: %(sort_key)s. Não é possível classificar " "por esta propriedade" #, python-format msgid "Invalid status value: %s" msgstr "Valro de status inválido: %s" #, python-format msgid "Invalid status: %s" msgstr "Status inválido: %s" #, python-format msgid "Invalid time format for %s." msgstr "Fromato de horário inválido para %s" msgid "Invalid type definition" msgstr "Definição de tipo inválida" #, python-format msgid "Invalid type value: %s" msgstr "Valor de tipo inválido: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Atualização inválida. Ela resultaria em uma propriedade de definição de " "metadados duplicada com o mesmo nome de %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Atualização inválida. Ela resultaria em um objeto de definição de metadados " "duplicado com o mesmo nome=%(name)s no namespace=%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Atualização inválida. Ela resultaria em um objeto de definição de metadados " "duplicado com o mesmo nome=%(name)s no namespace=%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Atualização inválida. Ela resultaria em uma propriedade de definição de " "metadados duplicada com o mesmo nome=%(name)s no namespace=" "%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Valor inválido '%(value)s' para o parâmetro '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Valor inválido para a opção %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Valor de visibilidade inválido: %s" msgid "Is not allowed value" msgstr "Não é um valor permitido" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "Parece que o módulo eventlet foi importado antes de configurar %s='yes'. " "Atualmente é necessário desativar eventlet.greendns se estiver usando ipv6, " "visto que o eventlet.greendns atualmente divide com endereços ipv6. Assegure-" "se de que eventlet não seja importado antes que este esteja sendo " "configurado." msgid "It's invalid to provide multiple image sources." msgstr "é inválido fornecer múltiplas fontes de imagens." msgid "It's not allowed to add locations if locations are invisible." msgstr "Não é permitido adicionar locais se os locais forem invisíveis." msgid "It's not allowed to remove locations if locations are invisible." msgstr "Não é permitido remover locais se os locais forem invisíveis." msgid "It's not allowed to update locations if locations are invisible." msgstr "Não é permitido atualizar locais se os locais forem invisíveis." msgid "Items have to be unique" msgstr "Os itens precisam ser exclusivos" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "O caminho Json deve iniciar com uma '/', não deve terminar com uma '/' e " "duas '/' subsequentes não são permitidas." msgid "Legacy image was not found" msgstr "A imagem anterior não foi localizada" msgid "Length is greater than maximum" msgstr "O comprimento é maior que o máximo" msgid "Length is less than minimum" msgstr "O comprimento é menor que o mínimo" msgid "Limit param must be an integer" msgstr "O parâmetro limite deve ser um número inteiro" msgid "Limit param must be positive" msgstr "O parâmetro limite deve ser positivo" #, python-format msgid "Limit param must not be higher than %d" msgstr "O parâmetro limite não deve ser maior que %d" msgid "Limits request ID length." msgstr "Limita o comprimento do ID da solicitação." msgid "List definitions may hot have defaults" msgstr "As definições de lista podem não ter padrões" msgid "List of strings related to the image" msgstr "Lista de sequências relacionadas à imagem" msgid "List size is greater than maximum" msgstr "O tamanho da lista é maior que o máximo" msgid "List size is less than minimum" msgstr "O tamanho da lista é menor que o mínimo" msgid "Loop time between checking for new items to schedule for delete." msgstr "" "Tempo de loop entre a verificação de novos itens para planejamento da " "exclusão." #, python-format msgid "Malformed Content-Range header: %s" msgstr "Cabeçalho de intervalo de conteúdo malformado: %s" msgid "Malformed JSON in request body." msgstr "JSON malformado no corpo da solicitação." msgid "Max string length may not exceed 255 characters" msgstr "O comprimento máximo de sequência não pode exceder 255 caracteres" msgid "Maximal age is count of days since epoch." msgstr "A idade máxima é a contagem de dias desde a época." msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "Tamanho máximo da linha de cabeçalhos da mensagem a ser aceito. " "max_header_line pode precisar ser aumentada ao utilizar tokens grandes " "(geralmente aqueles gerados pela API Keystone v3 com catálogos de serviço " "grandes" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" "Número máximo de membros da imagem por imagem. Valores negativos significam " "ilimitado." msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Número máximo de localizações permitidas em uma imagem. Valores negativos " "significam ilimitado." msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "" "Número máximo de propriedades permitidas em uma imagem. Valores negativos " "significam ilimitado." msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Número máximo de tags permitidas em uma imagem. Valores negativos significam " "ilimitado." msgid "Maximum permissible number of items that could be returned by a request" msgstr "" "Número máximo permitido de itens que puderam ser retornados por uma " "solicitação" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "O máximo de redirecionamentos (%(redirects)s) foi excedido." msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "Tamanho máximo de imagem que um usuário pode transferir por upload em bytes. " "O padrão é definido como 1099511627776 bytes (1 TB). AVISO: este valor só " "deve ser aumentado após uma consideração cuidadosa e deve ser configurado " "para um valor inferior a 8 EB (9223372036854775808)." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "O membro %(member_id)s é duplicado para a imagem %(image_id)s" msgid "Member can't be empty" msgstr "Membro não pode ser vazio" msgid "Member to be added not specified" msgstr "Membro a ser incluído não especificado" msgid "Membership could not be found." msgstr "Associação não pôde ser localizada." #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "O namespace de definição de metadados %(namespace)s é protegido e não pode " "ser excluída." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "Namespace de definição de metadados não localizado para o id=%s" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "" "O namespace=%(namespace_name)s de definição de metadados não foi localizado." #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "O objeto de definição de metadados %(object_name)s é protegido e não pode " "ser excluída." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "Objeto de definição de metadados não localizado para o id=%s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "A propriedade de definição de metadados %(property_name)s é protegida e não " "pode ser excluída." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "Propriedade de definição de metadados não localizada para id=%s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "A definição de metadados resource-type %(resource_type_name)s é um tipo de " "sistema com valor sementee não pode ser excluída." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "A definição de metadados resource-type-association %(resource_type)s é " "protegida e não poderá ser excluída." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "A identificação da definição de metadados %(tag_name)s é protegida e não " "pode ser excluída." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Identificação de definição de metadados não localizada para o id=%s" msgid "Min string length may not be negative" msgstr "O comprimento mín. de sequência não pode ser negativo" msgid "Minimal rows limit is 1." msgstr "O limite mínimo de linhas é 1." #, python-format msgid "Missing required credential: %(required)s" msgstr "Credencial necessária ausente: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Diversas correspondências do serviço de 'imagem' para a região %(region)s. " "Isso geralmente significa que uma região é necessária e você não a forneceu." msgid "Must supply a positive, non-zero value for age." msgstr "Deve-se fornecer um valor positivo diferente de zero para idade." msgid "Name of the paste configuration file." msgstr "Nome do arquivo de cola de configuração." #, python-format msgid "No artifact found with ID %s" msgstr "Nenhum artefato localizado com o ID %s" msgid "No authenticated user" msgstr "Usuário não autenticado" #, python-format msgid "No image found with ID %s" msgstr "Nenhuma imagem encontrada com o ID %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "Nenhum local localizado com o ID %(loc)s da imagem %(img)s" msgid "No permission to share that image" msgstr "Nenhum permissão para compartilhar essa imagem" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "Nenhum plug-in para '%(name)s' foi carregado" msgid "No property to access" msgstr "Nenhuma propriedade a ser acessada" #, python-format msgid "No such key '%s' in a dict" msgstr "Nenhum essa chave '%s' em um dic" #, python-format msgid "Not a blob property '%s'" msgstr "Não é uma propriedade blob '%s'" msgid "Not a downloadable entity" msgstr "Não é uma entidade transferível por download" msgid "Not a list property" msgstr "Não é uma propriedade de lista" #, python-format msgid "Not a list property '%s'" msgstr "Não é uma propriedade de lista '%s'" msgid "Not a valid value type" msgstr "Não é um tipo de valor válido" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "Nem todas as dependências estão no estado '%s'" #, python-format msgid "Not allowed to create members for image %s." msgstr "Não é permitido criar membros para a imagem %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Não é permitido desativar a imagem no status '%s'" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Não é permitido excluir membros para a imagem %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Não é permitido excluir identificações para a imagem %s." #, python-format msgid "Not allowed to list members for image %s." msgstr "Não é permitido listar os membros para a imagem %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Não é permitido reativar a imagem no status '%s'" #, python-format msgid "Not allowed to update members for image %s." msgstr "Não é permitido atualizar os membros para a imagem %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Não é permitido atualizar as identificações para a imagem %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "Não é permitido fazer upload de dados de imagem para a imagem %(image_id)s: " "%(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "Não é um idx de matriz '%s'" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "O número de diretórios de classificação não corresponde ao número de chaves " "de classificação" msgid "OVA extract is limited to admin" msgstr "O extrato de OVA é limitado para administrador" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "Os metadados OVF de interesse não foram especificados no arquivo de " "configuração ovf.metadata.json. Configure \"cim_pasd\" para uma lista de " "propriedades CIM_ProcessorAllocationSettingData de interesse." msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "" "O arquivo de configuração de propriedades OVF \"ovf-metadata.json\" não foi " "localizado." msgid "Old and new sorting syntax cannot be combined" msgstr "A sintaxe de classificação nova e antiga não podem ser combinadas" msgid "Only list indexes are allowed for blob lists" msgstr "Somente índices de lista são permitidos para listas de blob" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "A operação \"%s\" requer um membro denominado \"valor\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Objetos de operação devem conter exatamente um membro denominado \"incluir" "\", \"remover\" ou \"substituir\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Objetos de operação devem conter apenas um membro denominado \"incluir\", " "\"remover\" ou \"substituir\"." msgid "Operations must be JSON objects." msgstr "As operações devem ser objetos JSON." #, python-format msgid "Operator %(op)s is not supported" msgstr "O operador %(op)s não é suportado" #, python-format msgid "Original locations is not empty: %s" msgstr "Localidade original não está vazia: %s" msgid "Owner can't be updated by non admin." msgstr "O proprietário não pode ser atualizado por um não administrador." msgid "Owner must be specified to create a tag." msgstr "O proprietário deve ser especificado para criar uma identificação." msgid "Owner of the image" msgstr "Proprietário da imagem" msgid "Owner of the namespace." msgstr "Proprietário do namespace." msgid "Param values can't contain 4 byte unicode." msgstr "Valores de parâmetro não podem conter unicode de 4 bytes." msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "Nome parcial de um pipeline no seu arquivo de cola de configuração com o " "nome de serviço removido. Por exemplo se sua seção de cola é [pipeline:" "glance-api-keystone] utilize o valor \"keystone\"" msgid "Path to the directory where json metadata files are stored" msgstr "" "O caminho ao diretório em que os arquivos de metadados json estão armazenados" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" "O nome do plug-in '%(plugin)s' deve corresponder ao nome do tipo de artefato " "'%(name)s'" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "O ponteiro `%s` contém \"~\" não parte de uma sequência de escape " "reconhecida." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "O ponteiro `%s` contém uma \"/\" adjacente." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "O ponteiro `%s` não contém um token válido." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "O ponteiro `%s` não começa com \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "O ponteiro `%s` termina com \"/\"." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "" "O ponteiro contém '~', que não faz parte de uma sequência de escape " "reconhecida [~0, ~1]." #, python-format msgid "Port \"%s\" is not valid." msgstr "Porta \"%s\" não é válida." msgid "Port the registry server is listening on." msgstr "A porta em que o servidor de registro está escutando." #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "" "O componente numérico pré-liberação é muito extenso (%d caracteres máx.)" msgid "Private key file to use when starting API server securely." msgstr "" "Chave privada a ser utilizada quando iniciar o servidor de API de forma " "segura." #, python-format msgid "Process %d not running" msgstr "O processo %d não está em execução" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "As propriedades %s devem ser configuradas antes de salvar os dados." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "A propriedade %(property_name)s não começa com o prefixo de associação do " "tipo de recurso esperado de ‘%(prefix)s‘." #, python-format msgid "Property %s already present." msgstr "Propriedade %s já presente." #, python-format msgid "Property %s does not exist." msgstr "A propriedade %s não existe." #, python-format msgid "Property %s may not be removed." msgstr "A propriedade %s pode não ser removida." #, python-format msgid "Property %s must be set prior to saving data." msgstr "A propriedade %s deve ser configurada antes de salvar os dados." #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "A propriedade '%(name)s' pode não ter o valor '%(val)s': %(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "Propriedade '%s' é protegida" msgid "Property names can't contain 4 byte unicode." msgstr "Os nomes de propriedade não podem conter unicode de 4 bytes." #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "Proteção de propriedade na operação %(operation)s para a regra %(rule)s não " "localizada. Nenhuma função será permitida para executar esta operação." #, python-format msgid "Property's %(prop)s value has not been found" msgstr "O valor %(prop)s da propriedade não foi localizado" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "O tamanho da imagem fornecida deve corresponder ao tamanho da imagem " "armazenada. (tamanho fornecido: %(ps)d, tamanho armazenado:%(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "O objeto fornecido não corresponde ao esquema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Status de tarefa fornecido não é suportado: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Tipo de tarefa fornecido não é suportado: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Fornece uma descrição fácil do namespace." msgid "Public images do not have members." msgstr "As imagens não têm membros." msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "URL pública para usar com terminal de versões. O padrão é Nenhum, que irá " "usar o atributo host_url da solicitação para preencher a base da URL. Se " "Glance estiver operacional por trás de um proxy, você desejará alterá-lo " "para representar a URL do proxy." msgid "Python module path of data access API" msgstr "Caminho do módulo Python do acesso de dados de API" msgid "Received invalid HTTP redirect." msgstr "Redirecionamento de HTTP inválido recebido." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Redirecionando para %(uri)s para obter autorização." #, python-format msgid "Registry service can't use %s" msgstr "Serviço de registro não pode utilizar %s" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "O registro não foi configurado corretamente no servidor de API. Motivo: " "%(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "A relação %(name)s não pode ter diversos valores" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Recarregamento de %(serv)s não suportado" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Recarregando %(serv)s (pid %(pid)s) com sinal (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Removendo o arquivo pid %s antigo" msgid "Request body must be a JSON array of operation objects." msgstr "" "O corpo da solicitação deve ser uma matriz JSON de objetos de operação." msgid "Request must be a list of commands" msgstr "Requisição deve ser uma lista de comandos" msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "" "As propriedades de imagem necessárias para a verificação de assinatura não " "existem. Não é possível verificar a assinatura. " #, python-format msgid "Required store %s is invalid" msgstr "O armazenamento necessário %s é inválido" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "Os nomes do tipo de recurso devem estar alinhados aos tipos de recurso do " "Heat sempre que possível: http://docs.openstack.org/developer/heat/" "template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "A resposta de Keystone não contém um terminal de Visão Rápida." msgid "Role used to identify an authenticated user as administrator." msgstr "" "Papel utilizado para identificar um usuário autenticado como administrador." msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "Executar como um processo de execução longa. Quando não especificado (o " "padrão), executa a operação de limpeza uma vez e depois sai. Quando " "especificado, não saia e execute a limpeza dentro do intervalo wakeup_time, " "conforme especificado na configuração." msgid "Scope of image accessibility" msgstr "Escopo de acessibilidade de imagem" msgid "Scope of namespace accessibility." msgstr "Escopo da acessibilidade do namespace." msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "" "Chave de segredo a ser usada para sinalizar o rastreio de serviços da API " "Glance e de Registro Glance " #, python-format msgid "Server %(serv)s is stopped" msgstr "O servidor %(serv)s foi interrompido" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Falha na criação do trabalhador do servidor: %(reason)s." msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "Configure uma cota ampla do sistema para cada usuário. Esse valor é a " "capacidade total que um usuário pode usar em todos os sistemas de " "armazenamento. Um valor de 0 significa ilimitada. A unidade opcional pode " "ser especificada para o valor. Unidades aceitas são B, KB, MB, GB e TB que " "representa Bytes, Kilobytes, MegaBytes, GigaBytes e TeraBytes " "respectivamente. Se nenhuma unidade for especificada, então Bytes é " "assumido. Observe que não deve haver nenhum espaço entre o valor e unidade e " "unidades fazem distinção entre maiúsculas e minúsculas." #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "Mostrar o nível %(shl)s não é suportado nesta operação" msgid "Signature verification failed" msgstr "A verificação de assinatura falhou" msgid "Signature verification failed." msgstr "A verificação de assinatura falhou." msgid "Size of image file in bytes" msgstr "Tamanho do arquivo da imagem em bytes " msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Alguns tipos de recurso permitem mais de um par de chave/valor por " "instância. Por exemplo, o Cinder permite metadados do usuário e da imagem " "em volumes. Somente os metadados de propriedades da imagem são avaliados " "pelo Nova (planejamento ou drivers). Essa propriedade permite que um destino " "de namespace remova a ambiguidade." msgid "Sort direction supplied was not valid." msgstr "A direção de classificação fornecida não era válida." msgid "Sort key supplied was not valid." msgstr "A chave de classificação fornecida não era válida." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Especifica o prefixo a ser usado para o tipo de recurso determinado. " "Qualquer propriedade no namespace deve ter esse prefixo ao ser aplicada ao " "tipo de recurso especificado. O separador de prefixo deve ser incluído (p. " "ex., dois pontos :)." msgid "Specifies which task executor to be used to run the task scripts." msgstr "" "Especifica qual executor da tarefa a ser utilizado para executar os scripts " "da tarefa." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "O status deve ser \"pendente\", \"aceito\" ou \"rejeitado\"." msgid "Status not specified" msgstr "Status não especificado" msgid "Status of the image" msgstr "Status da imagem" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Status de transição de %(cur_status)s para %(new_status)s não é permitido" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Parando %(serv)s (pid %(pid)s) com sinal (%(sig)s)" #, python-format msgid "Store for image_id not found: %s" msgstr "Armazenamento de image_id não localizado: %s" #, python-format msgid "Store for scheme %s not found" msgstr "Armazenamento do esquema %s não localizado" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "%(attr)s fornecido (%(supplied)s) e %(attr)s gerado da imagem transferida " "por upload (%(actual)s) não corresponderam. Configurando o status da imagem " "para 'eliminado'." msgid "Supported values for the 'container_format' image attribute" msgstr "Valores suportados para o atributo de imagem 'container_format'" msgid "Supported values for the 'disk_format' image attribute" msgstr "Valores suportados para o atributo de imagem 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Novo spawn suprimido já que %(serv)s era %(rsn)s." msgid "System SIGHUP signal received." msgstr "Sinal SIGHUP do sistema recebido." #, python-format msgid "Task '%s' is required" msgstr "Tarefa '%s é obrigatória" msgid "Task does not exist" msgstr "A tarefa não existe" msgid "Task failed due to Internal Error" msgstr "A tarefa falhou devido a Erro interno" msgid "Task was not configured properly" msgstr "A tarefa não foi configurada adequadamente" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Tarefa com o ID fornecido %(task_id)s não foi localizada" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "O filtro \" changes-since \" não está mais disponível na v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "O arquivo CA especificado %s não existe" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "O objeto da Imagem %(image_id)s que está sendo criado por esta tarefa " "%(task_id)s não está mais no status válido para processamento adicional." msgid "The Store URI was malformed." msgstr "O URI de Armazenamento foi malformado." msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "A URL para o serviço do keystone. Se \"use_user_token\" não estiver em vigor " "e utilizando uma autorização do keystone, então a URL do keystone pode ser " "especificada." msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "" "O endereço no qual o serviço de autenticação Swift está atendendo." "(descontinuado)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "A senha do administrador. Se \"use_user_token\" não estiver em vigor, então " "as credenciais do administrador podem ser especificadas." msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "O nome de usuário do administrador. Se \"use_user_token\" não estiver em " "vigor, então as credenciais do administrador podem ser especificadas." msgid "The amount of time in seconds to delay before performing a delete." msgstr "" "A quantidade de tempo em segundos para esperar antes de realizar uma " "exclusão." msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "A quantia de tempo para permitir que uma imagem incompleta permaneça no " "cache, antes que a remoção de cache, se estiver em execução, remova a imagem " "incompleta." msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "" "O valor de backlog que será utilizado quando se estiver criando o socket de " "escuta TCP." #, python-format msgid "The cert file you specified %s does not exist" msgstr "O arquivo de certificado especificado %s não existe" msgid "The config file that has the swift account(s)configs." msgstr "O arquivo de configuração que possui as configurações da conta swift." msgid "The current status of this task" msgstr "O status atual desta tarefa" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "O dispositivo no qual reside o diretório de cache de imagem " "%(image_cache_dir)s não suporta xattr. É provável que você precise editar " "fstab e incluir a opção user_xattr na linha apropriada do dispositivo que " "contém o diretório de cache." msgid "The driver to use for image cache management." msgstr "O driver a ser utilizado para o gerenciamento de cache de imagens." #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "O formato da versão %s não é válido. Use a notação semver" msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "O formato para o qual as imagens serão convertidas automaticamente. Ao usar " "o backend de RBD, isso deve ser configurado para 'raw'" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "O URI fornecido não é válido. Especifique um uri válido a partir da seguinte " "lista de URI suportados %(supported)s" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "" "O nome do host/IP do processo pydev que atende às conexões de depuração" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "A imagem %s já está presente no escravo, mas nossa verificação por ela não a " "encontrou. Isto indica que nós não possuímos permissões para ver toras as " "imagens no servidor escravo." #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "O blob de artefato recebido é muito grande: %s" #, python-format msgid "The incoming image is too large: %s" msgstr "A imagem recebida é muito grande: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "O arquivo-chave especificado %s não existe" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "O limite foi excedido no número de localizações de imagens permitidas. " "Tentativa: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "O limite foi excedido no número de membros de imagem permitidos para esta " "imagem. Tentativa: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "O limite foi excedido no número de propriedades de imagem permitidas. " "Tentativa: %(attempted)s, Máximo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "O limite foi excedido no número de propriedades de imagem permitidas. " "Tentativa: %(num)s, Máximo: %(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "O limite foi excedido no número de tags de imagem permitidas. Tentativa: " "%(attempted)s, Máximo: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "O local %(location)s já existe" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Os dados da localização têm um ID inválido: %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "O local do arquivo de proteção de propriedade. Esse arquivo contém as regras " "para proteções de propriedade e as funções/políticas associadas a ele. Se " "este valor de configuração não for especificado, por padrão, as proteções de " "propriedade não serão impostas. Se um valor for especificado e o arquivo não " "for encontrado, então o serviço glance-api não iniciará." #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "Definição de metadados %(record_type)s com o nome=%(record_name)s não " "excluída. Outros registros ainda se referem a ela." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "O namespace de definição de metadados=%(namespace_name)s já existe." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "O objeto de definição de metadados com o nome=%(object_name)s não foi " "localizado no namespace=%(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "A propriedade de definição de metadados com o nome=%(property_name)s não foi " "localizada no namespace=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "A associação do tipo de recurso de definição de metadados do tipo derecurso=" "%(resource_type_name)s ao namespace=%(namespace_name)s já existe." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "A associação do tipo de recurso de definição de metadados do tipo derecurso=" "%(resource_type_name)s ao namespace=%(namespace_name)s, não foi localizada." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "O tipo de recurso de definição de metadados com o nome=" "%(resource_type_name)s, não foi localizado." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "A identificação da definição de metadados com o nome=%(name)s não foi " "localizada no namespace=%(namespace_name)s." msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "" "O modo no qual o mecanismo será executado. Pode ser 'serial' ou 'parallel'." msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "O número de trabalhadores de processo-filho que será criado para as " "solicitações de serviço. O padrão será igual ao número de CPUs disponíveis." msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "O número de atividades paralelas executadas ao mesmo tempo pelo mecanismo. O " "valor pode ser maior que um quando o modo de mecanismo está 'parallel'." msgid "The parameters required by task, JSON blob" msgstr "Os parâmetros requeridos pela tarefa, blob JSON" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "O caminho para o arquivo de certificado a ser usado em conexões SSL com o " "servidor de registro, se houver. Alternativamente, você pode configurar a " "variável de ambiente GLANCE_CLIENT_CERT_FILE para um caminho de arquivo do " "arquivo de certificado CA" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "O caminho para o arquivo de certificado da autoridade de certificação, a ser " "usado em conexões SSL com o servidor de registro, se houver. " "Alternativamente, você pode configurar a variável de ambiente " "GLANCE_CLIENT_CA_FILE para um caminho do arquivo do certificado CA ." msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "O caminho para o arquivo-chave a ser usado em conexões SSL com o servidor de " "registro, se houver. Alternativamente, é possível configurar a variável de " "ambiente GLANCE_CLIENT_KEY_FILE para um caminho de arquivo de chave" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "" "O caminho para o arquivo de base de dados sqlite que será utilizado para " "gerenciamento do cache de imagens." msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "O perído de tempo, em segundos que o servidor de API irá esperar por uma " "requisição de registro para completar. Um valor de 0 implica em nenhum tempo " "limite." msgid "The port on which a pydev process is listening for connections." msgstr "A porta na qual um processo pydev está atendendo às conexões." msgid "The port on which the server will listen." msgstr "A porta em que o servidor irá escutar." msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "" "O protocolo para utilizar para comunicação com o servidor de registro. Tanto " "HTTP ou HTTPS." #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "" "O corpo fornecido %(body)s é inválido sob o esquema especificado: %(schema)s" msgid "The provided image is too large." msgstr "A imagem fornecida é muito grande." #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "O caminho fornecido '%(path)s' é inválido: %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "A referência aos parâmetros de armazenamento para conta/auxiliar swift " "padrão usa para incluir novas imagens." msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "A região para o serviço de autenticação. Se \"use_user_token\" não estiver " "em vigor e utilizando a autorização do keystone, então o nome da região pode " "ser especificado." msgid "The request returned 500 Internal Server Error." msgstr "A solicitação retornou 500 Erro Interno do Servidor." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "A solicitação retornou 503 Serviço Indisponível. Isso geralmente ocorre em " "sobrecarga de serviço ou outra interrupção temporária." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "A solicitação retornou 302 Várias Opções. Isso geralmente significa que você " "não incluiu um indicador de versão em um URI de solicitação.\n" "\n" "O corpo da resposta retornou:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "A solicitação retornou 413 Entidade de Solicitação Muito Grande. Isso " "geralmente significa que a taxa de limitação ou um limite de cota foi " "violado.\n" "\n" "O corpo de resposta:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "A solicitação retornou um status inesperado: %(status)s.\n" "\n" "O corpo de resposta:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "A imagem solicitada foi desativada. O download de dados da imagem é proibido." msgid "The result of current task, JSON blob" msgstr "O resultado da tarefa atual, blob JSON" msgid "The signature data was not properly encoded using base64" msgstr "" "Os dados da assinatura não foram codificados corretamente usando base64." #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "O tamanho dos dados que %(image_size)s irá exceder do limite. %(remaining)s " "bytes restantes." msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "O tamanho do conjunto de encadeamentos a ser usado para limpar imagens. O " "padrão é um, o que significa limpeza serial. Qualquer valor acima de um " "indica o número máximo de imagens que podem ser limpas em paralelo." #, python-format msgid "The specified member %s could not be found" msgstr "O membro especificado %s não pôde ser localizado" #, python-format msgid "The specified metadata object %s could not be found" msgstr "O objeto de metadados especificado %s não pôde ser localizado" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "A identificação de metadados especificada %s não pôde ser localizada" #, python-format msgid "The specified namespace %s could not be found" msgstr "O namespace especificado %s não pôde ser localizado" #, python-format msgid "The specified property %s could not be found" msgstr "A propriedade especificada %s não pôde ser localizada" #, python-format msgid "The specified resource type %s could not be found " msgstr "O tipo de recurso especificado %s não pôde ser localizado " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "O status de local da imagem excluída só pode ser definido como " "'pending_delete' ou 'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "O status de local da imagem excluída só pode ser definido como " "'pending_delete' ou 'deleted'." msgid "The status of this image member" msgstr "O status desse membro da imagem" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "Os nomes de armazenamento a ser usados para obter a ordem de preferência de " "armazenamento. O nome deve ser registrado por um dos armazenamentos " "definidos pela opção de configuração de ‘armazenamentos'. Essa opção será " "aplicada ao usar a opção 'store_type' como estratégia do local de imagem " "definida pela opção de configuração 'location_strategy'." msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "A estratégia a ser utilizada para autenticação. Se \"use_user_token\" não " "estiver em vigor, então a estratégia de autorização pode ser especificada." #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "O membro de destino %(member_id)s já está associado à imagem %(image_id)s." msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "O nome de locatário do usuário administrativo. Se \"use_user_token\" não " "estiver em vigor, então o nome de locatário do administrador pode ser " "especificado." msgid "The type of task represented by this content" msgstr "O tipo de tarefa representada por este conteúdo" msgid "The unique namespace text." msgstr "O texto do namespace exclusivo." msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "O limite superior (o tamanho máximo do cache acumulado, em bytes) além da " "remoção do cache que, se estiver em execução, inicia a limpeza do cache de " "imagens." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "O nome fácil do namespace. Usando pela interface com o usuário, se " "disponível." msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "" "O usuário a ser autenticado com relação ao serviço de autenticação Swift " "(descontinuado)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "O valor para a opção de soquete TCP_KEEPIDLE. Esse é o tempo em segundos que " "a conexão deve ficar inativa antes que o TCP comece a enviar análises keep-" "alive." #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "Há um problema com o %(error_key_name)s %(error_filename)s. Verifique-o. " "Erro: %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "Há um problema com o %(error_key_name)s %(error_filename)s. Verifique-o. " "Erro de OpenSSL: %(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "Há um problema com seu par de chaves. Verifique se o certificado " "%(cert_file)s e a chave %(key_file)s estão juntos. Erro de OpenSSL %(ce)s" msgid "There was an error configuring the client." msgstr "Houve um erro ao configurar o cliente." msgid "There was an error connecting to a server" msgstr "Houve um erro ao conectar a um servidor" msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "Esse valor de configuração indica se \"funções\" ou \"políticas\" são usados " "no arquivo de proteção de propriedade." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Esta operação não é atualmente permitida em Tarefas do Glance. Elas são " "automaticamente excluídas após atingir o tempo com base em sua propriedade " "expires_at." msgid "This operation is currently not permitted on Glance images details." msgstr "" "Esta operação não é atualmente permitida em detalhes de imagens do Glance." msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "Este valor define qual estratégia será utilizada para determinar a ordem do " "local da imagem. Atualmente, duas estratégias são empacotadas com " "'location_order' e 'store_type' de Glance." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "Tempo em horas durante o qual uma tarefa é mantida, com êxito ou falha" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "Tempo limite para operações de soquete de conexões do cliente. Se uma " "conexão recebida estiver inativa por esse número de segundos, ela será " "encerrada. Um valor de '0' significa aguardar para sempre." msgid "Too few arguments." msgstr "Muito poucos argumentos." msgid "Too few locations" msgstr "Poucos locais" msgid "Too many locations" msgstr "Muitos locais" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "Tamanho total é %(size)d bytes em %(img_count)d imagens" msgid "Turn on/off delayed delete." msgstr "Liga/desliga a exclusão atrasada." msgid "Type version has to be a valid semver string" msgstr "A versão do tipo precisa ser uma sequência semver válida" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "URI não pode conter mais de uma ocorrência de um esquema. Se você tiver " "especificado um URI como swift://user:pass@http://authurl.com/v1/container/" "obj, precisará alterá-lo para usar o esquema swift+http://, desta forma: " "swift+http://user:pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "URL para acessar o arquivo de imagem mantido no armazenamento externo " msgid "Unable to PUT to non-empty collection" msgstr "Não é possível fazer PUT para coleção não vazia" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Impossível criar arquivo pid %(pid)s. Executando como não raiz?\n" "Voltando para um arquivo temporário, é possível parar o serviço %(service)s " "usando:\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Não é possível filtrar por operador desconhecido '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "Não é possível filtrar um intervalo com um valor não numérico." msgid "Unable to filter on a unknown operator." msgstr "Não é possível filtrar em um operador desconhecido." msgid "Unable to filter using the specified operator." msgstr "Não é possível filtrar usando o operador especificado." msgid "Unable to filter using the specified range." msgstr "Não é possível filtrar usando o intervalo especificado." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Não é possível localizar '%s' na mudança de Esquema JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Não é possível localizar `op` na mudança de Esquema JSON. Deve ser um dos " "seguintes: %(available)s." msgid "Unable to get legacy image" msgstr "Não é possível obter a imagem anterior" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Não é possível aumentar o limite do descritor de arquivo. Executando como " "não-raiz?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Não é possível carregar %(app_name)s do arquivo de configuração " "%(conf_file)s.\n" "Obtido: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Não é possível carregar o esquema: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Impossível localizar o arquivo de configuração de colagem para %s." msgid "Unable to modify collection in immutable or readonly property" msgstr "" "Não é possível modificar a coleção na propriedade imutável ou somente leitura" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "Não é possível recuperar o certificado com ID: %s" msgid "Unable to retrieve request id from context" msgstr "Não é possível recuperar o ID da solicitação do contexto" msgid "Unable to specify artifact type explicitly" msgstr "Não é possível especificar o tipo de artefato explicitamente" msgid "Unable to specify artifact type version explicitly" msgstr "Não é possível especificar a versão do tipo de artefato explicitamente" msgid "Unable to specify version if multiple types are possible" msgstr "Não é possível especificar a versão se vários tipos forem possíveis" msgid "Unable to specify version if type is not specified" msgstr "Não é possível especificar a versão se o tipo não for especificado" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "" "Não é possível fazer upload de dados de imagem duplicados para a imagem " "%(image_id)s: %(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "" "Não é possível verificar assinatura porque o algoritmo não é suportado nesse " "sistema" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "Não é possível verificar assinatura: %(reason)s" msgid "Unauthorized image access" msgstr "Acesso à imagem desautorizado" msgid "Unexpected body type. Expected list/dict." msgstr "Tipo de corpo inesperado. Lista/dicionário esperados." #, python-format msgid "Unexpected response: %s" msgstr "Resposta inesperada: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Estratégia de autenticação desconhecida %s'" #, python-format msgid "Unknown command: %s" msgstr "Comando desconhecido: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direção de classificação desconhecida; deve ser 'desc' ou 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Versão rascunho do Esquema JSON não reconhecida" msgid "Unrecognized changes-since value" msgstr "Valor desde as alterações não reconhecido" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "sort_dir não suportado. Valores aceitáveis: %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "sort_key não suportado. Valores aceitáveis: %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "Valor %(value)d fora da faixa; não deve exceder %(max)d" msgid "Value is greater than maximum" msgstr "O valor é maior que o máximo" msgid "Value is less than minimum" msgstr "O valor é menor que o mínimo" msgid "Value is required" msgstr "O valor é necessário" #, python-format msgid "Version component is too large (%d max)" msgstr "O componente da versão é muito extenso (%d máx.)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "A versão é inválida: %(reason)s" msgid "Virtual size of image in bytes" msgstr "Tamanho virtual de imagem em bytes " msgid "Visibility must be either \"public\" or \"private\"" msgstr "Visibilidade deve ser \"público\" ou \"privado\"" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Esperou 15 segundos para pid %(pid)s (%(file)s) ser eliminado; desistindo" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "Quando false, nenhum artefato pode ser carregado independentemente de " "available_plugins. Quando true, os artefatos podem ser carregados." msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "Ao executar o servidor no modo SSL, você deve especificar um valor de opção " "cert_file e key_file no seu arquivo de configuração" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "Quando verdadeiro, esta opção seta o dono da image para ser o tenant. Caso " "contrário o dono da imagem será o usuário autenticado realizando a " "requisição." msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "Ao usar SSL nas conexões com o servidor de registro, não é necessária a " "validação por meio de uma autoridade de certificação. Esse é o equivalente " "do registro de especificação --insecure na linha de comandos usando " "glanceclient para o API." msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "Se permite que os usuários especifiquem propriedades de imagem além do que o " "esquema de imagem fornece" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "Especifica se os locais da imagem de backend devem ser incluídos nas " "propriedades da imagem. Por exemplo, se usar um armazenamento de sistema de " "arquivos, uma URL de \"file:///path/to/image\" será retornada para o usuário " "no campo de metadados 'direct_url'. A revelação do local de armazenamento " "pode representar um risco à segurança, portanto, use essa configuração com " "cuidado! Configurar para true substitui a opção show_image_direct_url." msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "Se inclui o local de armazenamento de imagem de backend nas propriedades de " "imagem. A revelação do local de armazenamento pode ser um risco de " "segurança, por isso, use essa configuração com cuidado!" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "Decida se deve passar por cabeçalhos contendo informações de usuário e de " "locatário ao fazer solicitações para o registro. Isso permite que o registro " "use o middleware de contexto sem o auth_token middleware keystonemiddleware, " "removendo chamadas para o serviço de autenticação keystone. É recomendável " "que, ao usar essa opção, uma comunicação segura entre a api de glance e o " "registro de glance seja assegurada por um meio diferente de auth_token " "middleware." msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "Se passar pelo token do usuário ao fazer solicitações ao registro. Para " "evitar falhas com expiração de token durante o upload de arquivos grandes, é " "recomendável configurar esse parâmetro como False. Se \"use_user_token\" não " "estiver em vigor, as credenciais do administrador poderão ser especificadas." msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "O diretório de trabalho para operações de tarefa assíncrona. O diretório " "definido aqui será usado para operar sobre imagens – normalmente antes de " "serem importados no armazenamento de destino. Ao fornecer o diretório de " "trabalho, certifique-se de que espaço suficiente seja fornecido para tarefas " "simultâneas para executar de forma eficiente sem execução fora do espaço. " "Uma estimativa aproximada pode ser feita multiplicando o número de " "`max_workers` – ou o N de trabalhadores em execução – por um tamanho médio " "de imagem (por exemplo, 500 MB). A estimativa de tamanho da imagem deve ser " "feita com base no tamanho médio em sua implementação. Observe que, " "dependendo das tarefas em execução pode ser necessário multiplicar este " "número por algum fator, dependendo do que a tarefa faz. Por exemplo, você " "pode desejar duplicar o tamanho disponível se a conversão de imagem estiver " "ativada. Com tudo isto que está sendo dito, lembre-se que estas são apenas " "estimativas e é necessário fazê-las com base no pior cenário e estar " "preparado para agir em caso deles estarem errados." #, python-format msgid "Wrong command structure: %s" msgstr "Estrutura de comandos incorreta: %s" msgid "You are not authenticated." msgstr "Você não está autenticado." msgid "You are not authorized to complete this action." msgstr "Você não está autorizado a concluir esta ação." #, python-format msgid "You are not authorized to lookup image %s." msgstr "Você não está autorizado a consultar a imagem %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "Você não está autorizado a consultar os membros da imagem %s." #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "" "Você não tem permissão para criar uma identificação no namespace de " "propriedade de '%s'" msgid "You are not permitted to create image members for the image." msgstr "Você não tem permissão para criar membros da imagem." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Você não tem permissão para criar imagens de propriedade de '%s'." #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "Você não tem permissão para criar namespace de propriedade de '%s'" #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "Você não tem permissão para criar objeto de propriedade de '%s'" #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "" "Você não tem permissão para criar essa propriedade de propriedade de '%s'" #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "Você não tem permissão para criar resource_type de propriedade de '%s'" #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "" "Você não tem permissão para criar essa tarefa com proprietário como: %s" msgid "You are not permitted to deactivate this image." msgstr "Você não tem permissão para desativar esta imagem." msgid "You are not permitted to delete this image." msgstr "Você não tem permissão para excluir esta imagem." msgid "You are not permitted to delete this meta_resource_type." msgstr "Você não tem permissão para excluir esse meta_resource_type." msgid "You are not permitted to delete this namespace." msgstr "Você não tem permissão para excluir esse namespace." msgid "You are not permitted to delete this object." msgstr "Você não tem permissão para excluir esse objeto." msgid "You are not permitted to delete this property." msgstr "Você não tem permissão para excluir essa propriedade." msgid "You are not permitted to delete this tag." msgstr "Você não tem permissão para excluir esta identificação." #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "Você não ter permissão para modificar '%(attr)s' nesse %(resource)s." #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "Você não tem permissão para modificar '%s' nesta imagem." msgid "You are not permitted to modify locations for this image." msgstr "Você não tem permissão para modificar locais para esta imagem." msgid "You are not permitted to modify tags on this image." msgstr "Você não tem permissão para modificar tags nesta imagem." msgid "You are not permitted to modify this image." msgstr "Você não tem permissão para modificar esta imagem." msgid "You are not permitted to reactivate this image." msgstr "Você não tem permissão para reativar essa imagem." msgid "You are not permitted to set status on this task." msgstr "Você não tem permissão para definir o status dessa tarefa." msgid "You are not permitted to update this namespace." msgstr "Você não tem permissão para editar esse namespace." msgid "You are not permitted to update this object." msgstr "Você não tem permissão para atualizar esse objeto." msgid "You are not permitted to update this property." msgstr "Você não tem permissão para atualizar essa propriedade." msgid "You are not permitted to update this tag." msgstr "Você não tem permissão para atualizar esta identificação." msgid "You are not permitted to upload data for this image." msgstr "Você não tem permissão para fazer upload de dados para esta imagem." #, python-format msgid "You cannot add image member for %s" msgstr "Não é possível incluir o membro da imagem para %s" #, python-format msgid "You cannot delete image member for %s" msgstr "Não é possível excluir o membro da imagem para %s" #, python-format msgid "You cannot get image member for %s" msgstr "Não é possível obter o membro da imagem para %s" #, python-format msgid "You cannot update image member %s" msgstr "Não é possível atualizar o membro da imagem %s" msgid "You do not own this image" msgstr "Você não possui essa imagem" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Você optou por usar SSL na conexão e forneceu um certificado, mas falhou em " "fornecer um parâmetro key_file ou configurar a variável de ambiente " "GLANCE_CLIENT_KEY_FILE" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Você optou por usar SSL na conexão e forneceu uma chave, mas falhou em " "fornecer um parâmetro cert_file ou configurar a variável de ambiente " "GLANCE_CLIENT_CERT_FILE" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() obteve argumento de palavra-chave inesperado '%s'" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "Não é possível a transição de %(current)s para %(next)s na atualização " "(desejado from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "conflito de propriedades customizadas (%(props)s) com propriedades de base" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "nem o hub 'poll' nem o 'selects' do eventlet estão disponíveis nesta " "plataforma" msgid "is_public must be None, True, or False" msgstr "is_public deve ser Nenhum, True ou False" msgid "limit param must be an integer" msgstr "o parâmetro limit deve ser um número inteiro" msgid "limit param must be positive" msgstr "o parâmetro limit deve ser positivo" #, python-format msgid "location: %s data lost" msgstr "local: %s dados perdidos" msgid "md5 hash of image contents." msgstr "Hash md5 do conteúdo da imagem." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() obteve palavras-chave inesperadas %s" msgid "protected must be True, or False" msgstr "protegido deve ser True, ou False" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "Não é possível ativar %(serv)s. Obteve erro: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id é muito longo; tamanho máximo %s" glance-12.0.0/glance/locale/it/0000775000567000056710000000000012701407204017265 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/it/LC_MESSAGES/0000775000567000056710000000000012701407204021052 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/it/LC_MESSAGES/glance.po0000664000567000056710000034651712701407051022663 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Alessandra , 2016. #zanata # Tom Cocozzello , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev4\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-21 10:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-21 06:27+0000\n" "Last-Translator: Alessandra \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s deve essere una stringa" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s è necessario" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s non può essere più lungo di %(length)i" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s non può essere più corto di %(length)i" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s deve corrispondere al pattern %(pattern)s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "Eccezione %(cls)s generata nell'ultima chiamata rpc: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s non trovato nell'elenco di membri dell'immagine %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) in esecuzione..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s sembra essere già in esecuzione: %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s è registrato come modulo due volte. %(module)s non viene del " "provider." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s di %(task_type)s non configurato correttamente. Impossibile " "caricare l'archivio filesystem" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s di %(task_type)s non configurato correttamente. Directory di " "lavoro mancante: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(verb)s %(serv)s con %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Specificare una coppia host:port in cui host è un indirizzo IPv4, un " "indirizzo IPv6 nome host o FQDN. Se si utilizza un indirizzo IPv6 " "racchiuderlo in parentesi separatamente dalla porta (ad esempio, \"[fe80::a:" "b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s non può contenere 4 byte di caratteri unicode." #, python-format msgid "%s is already stopped" msgstr "%s è già stato arrestato" #, python-format msgid "%s is stopped" msgstr "%s è stato arrestato" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "Valore '%(param)s' fuori dall'intervallo, non deve superare %(max)d" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "l'opzione --os_auth_url o la variabile d'ambiente OS_AUTH_URL sono " "obbligatori quando è abilitato il modo di autenticazione keystone\n" msgid "A body is not expected with this request." msgstr "Un corpo non è previsto con questa richiesta." msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "Un elenco di risorse utente consentite nel formato nome o nome-versione. Un " "elenco vuoto indica che non può essere caricata nessuna risorsa utente." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Un oggetto della definizione di metadati con nome=%(object_name)s già " "esiste nello nello spazio dei nomi=%(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Una proprietà della definizione di metadati con nome=%(property_name)s già " "esiste nello spazio dei nomi=%(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Un tipo-risorsa della definizione di metadati con nome=" "%(resource_type_name)s già esiste." #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "Un tag di metadata denominato=%(name)s esiste già nello nello spazio dei " "nomi=%(namespace_name)s." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Un insieme di URL per accedere al file di immagini conservato nell'archivio " "esterno" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "Chiave AES per la codifica dei metadati 'location' dell'archivio. Include, " "se utilizzate, le credenziali Swift o S3. Deve essere impostata su una " "stringa causale di lunghezza pari a 16, 24 o 32 byte" msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" "Indirizzo per il bind del server. Utile quando si seleziona una particolare " "interfaccia di rete." msgid "Address to find the registry server." msgstr "Indirizzo per trovare il server di registro." msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "Consenti agli utenti non autenticati di accedere all'API con dei privilegi " "di sola lettura. Ciò si applica solo quando si utilizza ContextMiddleware." #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "" "I valori consentiti %s non sono validi nei programmi di convalida forniti" msgid "Amount of disk space (in GB) required to boot image." msgstr "Quantità di spazio su disco (in GB) richiesto per l'immagine di avvio." msgid "Amount of ram (in MB) required to boot image." msgstr "Quantità di ram (in MB) richiesta per l'immagine di avvio." msgid "An identifier for the image" msgstr "Un identificativo per l'immagine" msgid "An identifier for the image member (tenantId)" msgstr "Un identificativo per il membro dell'immagine (tenantId)" msgid "An identifier for the owner of this task" msgstr "Un identificativo del proprietario di questa attività" msgid "An identifier for the task" msgstr "Un identificativo per l'attività" msgid "An image file url" msgstr "Un URL al file di immagini" msgid "An image schema url" msgstr "Un URL allo schema di immagini" msgid "An image self url" msgstr "Un URL personale all'immagine" #, python-format msgid "An image with identifier %s already exists" msgstr "Un'immagine con identificativo %s già esiste" msgid "An import task exception occurred" msgstr "Si è verificata un'eccezione attività di importazione" msgid "An object with the same identifier already exists." msgstr "Già esiste un oggetto con lo stesso identificativo." msgid "An object with the same identifier is currently being operated on." msgstr "Un oggetto con lo stesso identificativo è attualmente in uso." msgid "An object with the specified identifier was not found." msgstr "Impossibile trovare un oggetto con l'identificativo specificato." msgid "An unknown exception occurred" msgstr "Si è verificata un'eccezione sconosciuta" msgid "An unknown task exception occurred" msgstr "Si è verificata un'eccezione attività sconosciuta" #, python-format msgid "Array has no element at position %d" msgstr "L'array non ha elementi alla posizione %d " msgid "Array property can't have item_type=Array" msgstr "La proprietà dell'array non può avere item_type=Array" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "La risorsa %s non può essere eliminata perché è in uso: %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "" "Impossibile modificare lo stato della risorsa utente da %(source)s a " "%(target)s" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "La risorsa supera la quota di memoria: %s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "La risorsa utente non ha proprietà %(prop)s" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "" "Impossibile modificare lo stato della risorsa utente da %(curr)s a %(to)s" #, python-format msgid "Artifact storage media is full: %s" msgstr "Il supporto di memoria della risorsa è pieno: %s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "" "Il tipo di risorsa utente con nome '%(name)s' e versione '%(version)s' non " "è noto" msgid "Artifact with a circular dependency can not be created" msgstr "Impossibile creare una risorsa con una dipendenza circolare" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "Risorsa utente con id=%(id)s non accessibile" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "Risorsa utente con id=%(id)s non trovata" msgid "Artifact with the specified type, name and version already exists" msgstr "La risorsa utente con il tipo specificato, nome e versione già esiste" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "La risorsa utente con il tipo specificato, nome e versione già dispone di " "una dipendenza diretta=%(dep)s" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "La risorsa utente con il tipo specificato, nome e versione già dispone di " "una dipendenza transitiva=%(dep)s" msgid "Attempt to set readonly property" msgstr "Tentativo di impostare la proprietà in sola lettura" msgid "Attempt to set value of immutable property" msgstr "Tentativo di impostare il valore della proprietà immutabile" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "Tentativo di caricare un duplicato di immagine: %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "Si è tentato di aggiornare il campo Ubicazione per un'immagine che non si " "trova nello stato accodato." #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "Attributo '%(property)s' è di sola lettura." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "L'attributo '%(property)s' è riservato." #, python-format msgid "Attribute '%s' is read-only." msgstr "Attributo '%s' è di sola lettura." #, python-format msgid "Attribute '%s' is reserved." msgstr "L'attributo '%s' è riservato." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "L'attributo container_format può essere sostituito solo per un'immagine " "nella coda." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "L'attributo disk_format può essere sostituito solo per un'immagine nella " "coda." msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" "Chiave auth per l'utente che si sta autenticando sul servizio di " "autenticazione servizio. (obsoleto)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Servizio di autenticazione all'URL %(url)s non trovato." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Errore di autenticazione - il token potrebbe essere scaduto durante il " "caricamento del file. Eliminazione dei dati dell'immagine per %s." msgid "Authorization failed." msgstr "Autorizzazione non riuscita." msgid "Available categories:" msgstr "Categorie disponibili:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Formato filtro di query \"%s\" errato. Utilizzare la notazione ISO 8601 " "DateTime." #, python-format msgid "Bad Command: %s" msgstr "Comando non corretto: %s" #, python-format msgid "Bad header: %(header_name)s" msgstr "Intestazione non valida: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "Il valore non valido fornito al filtro %(filter)s ha riportato %(val)s" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "URI S3 formato in modo non corretto: %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Credenziali con formato non corretto %(creds)s' nell'URI Swift" msgid "Badly formed credentials in Swift URI." msgstr "Credenziali formate in modo non corretto nell'URI Swift." msgid "Base directory that the image cache uses." msgstr "La directory di base che la cache dell'immagine utilizza." msgid "BinaryObject property cannot be declared mutable" msgstr "La proprietà BinaryObject non può essere dichiarata mutevole" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "Blob %(name)s non può avere più valori" msgid "Blob size is not set" msgstr "Dimensione blob non impostata" msgid "Body expected in request." msgstr "Corpo previsto nella richiesta." msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "" "Non è possibile specificare contemporaneamente entrambi file e " "legacy_image_id" msgid "CA certificate file to use to verify connecting clients." msgstr "" "File certificato CA da utilizzare per verificare la connessione dei client." msgid "Cannot be a negative value" msgstr "Non può essere un valore negativo" msgid "Cannot be a negative value." msgstr "Non può essere un valore negativo." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "" "Impossibile convertire %(key)s dell'immagine '%(value)s' in un numero intero." msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "" "Impossibile dichiarare la proprietà della risorsa utente con il nome " "riservato 'metadata'" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "Impossibile caricare la risorsa utente '%(name)s'" msgid "Cannot remove last location in the image." msgstr "Impossibile rimuovere l'ultima ubicazione nell'immagine." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "Impossibile salvare i dati per l'immagine %(image_id)s: %(error)s" msgid "Cannot set locations to empty list." msgstr "Impossibile impostare le ubicazione nell'elenco vuoto." msgid "Cannot specify 'max_size' explicitly" msgstr "Impossibile specificare 'max_size' esplicitamente" msgid "Cannot specify 'min_size' explicitly" msgstr "Impossibile specificare 'min_size' esplicitamente" msgid "Cannot upload to an unqueued image" msgstr "Impossibile caricare un'immagine non accodata" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "Impossibile utilizzare questo parametro con l'operatore %(op)s" msgid "Certificate file to use when starting API server securely." msgstr "" "File certificato da utilizzare quando si avvia il server API in modo sicuro." #, python-format msgid "Certificate format not supported: %s" msgstr "Formato certificato non supportato: %s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "Il certificato non è valido dopo: %s UTC" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "Il certificato non è valido prima di: %s UTC" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Verifica checksum non riuscita. È stata interrotta la memorizzazione nella " "cache dell'immagine '%s'." msgid "Client disconnected before sending all data to backend" msgstr "Client disconnesso prima di inviare tutti i dati a backend" msgid "Command not found" msgstr "Comando non trovato" msgid "Configuration option was not valid" msgstr "Opzione di configurazione non valida" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Connetti richiesta/non corretta o in errore per il servizio di " "autenticazione all'URL %(url)s." #, python-format msgid "Constructed URL: %s" msgstr "URL costruita: %s" msgid "Container format is not specified." msgstr "Formato contenitore non specificato. " msgid "Content-Type must be application/octet-stream" msgstr "Tipo-contenuto deve essere application/octet-stream" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "" "Esecuzione del download immagine danneggiato per l'immagine %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Impossibile collegarsi a %(host)s:%(port)s dopo aver tentato per 30 secondi" msgid "Could not find OVF file in OVA archive file." msgstr "Impossibile trovare il file OVD nel file di archivio OVA." #, python-format msgid "Could not find metadata object %s" msgstr "Impossibile trovare l'oggetto di metadati %s" #, python-format msgid "Could not find metadata tag %s" msgstr "Impossibile trovare il tag di metadati %s" #, python-format msgid "Could not find namespace %s" msgstr "Impossibile trovare lo spazio dei nomi %s" #, python-format msgid "Could not find property %s" msgstr "Impossibile trovare la proprietà %s" msgid "Could not find required configuration option" msgstr "Impossibile trovare l'opzione di configurazione richiesta" #, python-format msgid "Could not find task %s" msgstr "Impossibile trovare l'attività %s" #, python-format msgid "Could not update image: %s" msgstr "Impossibile aggiornare l'immagine: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" "Attualmente, i pacchetti OVA che contengono più dischi non sono supportati." msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "L'elenco di programmi di convalida personalizzati contiene tuple '(funzione, " "messaggio)'" #, python-format msgid "Data for image_id not found: %s" msgstr "Dati per image_id non trovati: %s" msgid "Data supplied was not valid." msgstr "I dati forniti non erano validi." msgid "Date and time of image member creation" msgstr "Data e ora di creazione del membro dell'immagine" msgid "Date and time of image registration" msgstr "Data e ora della registrazione dell'immagine" msgid "Date and time of last modification of image member" msgstr "Data e ora dell'ultima modifica del membro dell'immagine" msgid "Date and time of namespace creation" msgstr "Data ed ora della creazione dello spazio dei nomi" msgid "Date and time of object creation" msgstr "Data ed ora della creazione dell'oggetto" msgid "Date and time of resource type association" msgstr "Data ed ora dell'associazione del tipo di risorsa" msgid "Date and time of tag creation" msgstr "Data ed ora della creazione del tag" msgid "Date and time of the last image modification" msgstr "Data e ora dell'ultima modifica dell'immagine" msgid "Date and time of the last namespace modification" msgstr "Data ed ora dell'ultima modifica allo spazio dei nomi" msgid "Date and time of the last object modification" msgstr "Data ed ora dell'ultima modifica all'oggetto" msgid "Date and time of the last resource type association modification" msgstr "Data ed ora dell'ultima modifica all'associazione del tipo di risorsa" msgid "Date and time of the last tag modification" msgstr "Data ed ora dell'ultima modifica al tag" msgid "Datetime when this resource was created" msgstr "Data e ora in cui questa risorsa è stata creata" msgid "Datetime when this resource was updated" msgstr "Data e ora in cui questa risorsa è stata aggiornata" msgid "Datetime when this resource would be subject to removal" msgstr "Data e ora in cui questa risorsa verrà rimossa" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "Il valore predefinito per il numero di elementi restituiti da una richiesta " "se non specificato esplicitamente nella richiesta" msgid "Default value is invalid" msgstr "Il valore predefinito non è valido" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "" "Rifiutato il tentativo di caricare la risorsa perché supera la quota: %s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" "Rifiutato il tentativo di caricare l'immagine perché supera la quota: %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "Divieto del tentativo di caricare un'immagine più grande di %d byte." #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "Prima deve essere eliminata la proprietà dipendenza '%s'" msgid "Dependency relations cannot be mutable" msgstr "Le relazioni di dipendenza non possono essere mutevoli" msgid "Deploy the v1 OpenStack Images API." msgstr "Distribuire l'API immagini OpenStack v1." msgid "Deploy the v1 OpenStack Registry API." msgstr "Distribuire l'API del registro OpenStack v1." msgid "Deploy the v2 OpenStack Images API." msgstr "Distribuire l'API immagini OpenStack v2." msgid "Deploy the v2 OpenStack Registry API." msgstr "Distribuire l'API del registro OpenStack v2." msgid "Descriptive name for the image" msgstr "Nome descrittivo per l'immagine" msgid "Dictionary contains unexpected key(s)" msgstr "Il dizionario contiene chiavi impreviste" msgid "Dictionary size is greater than maximum" msgstr "La dimensione del dizionario è superiore a quella massima" msgid "Dictionary size is less than minimum" msgstr "La dimensione del dizionario è inferiore a quella minima" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "L'algoritmo digest che verrà utilizzato per la firma digitale. Utilizzare il " "comando \"openssl list-message-digest-algorithms\" per richiamare gli " "algoritmi disponibili supportati dalla versione di OpenSSL sulla " "piattaforma. Esempi sono \"sha1\", \"sha256\", \"sha512\" e così via." msgid "Disk format is not specified." msgstr "Formato disco non specificato. " msgid "Does not match pattern" msgstr "Non corrisponde al pattern" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Impossibile configurare il driver %(driver_name)s correttamente. Motivo: " "%(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "È necessario specificare un file o un legacy_image_id." msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Errore di decodifica della richiesta. L'URL o il corpo della richiesta " "contengono caratteri che non possono essere decodificati da Glance" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "" "Errore durante il recupero dei membri immagine %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "" "Errore nella configurazione dell'archivio. L'aggiunta di risorse a questo " "archivio non è consentita." msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Errore nella configurazione dell'archivio. L'aggiunta di immagini a questo " "archivio non è consentita." msgid "Error occurred while creating the verifier" msgstr "Si è verificato un errore durante la creazione del verificatore" msgid "Error occurred while verifying the signature" msgstr "Si è verificato un errore durante la verifica della firma" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Previsto un membro nel formato: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Previsto uno stato nel formato: {\"status\": \"status\"}" msgid "External source should not be empty" msgstr "L'origine esterna non deve essere vuota" #, python-format msgid "External sources are not supported: '%s'" msgstr "Le origini esterne non sono supportate: '%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "Attivazione immagine non riuscita. Ricevuto errore: %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "Impossibile aggiungere metadati all'immagine. Ricevuto errore: %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "Impossibile trovare la risorsa %(artifact_id)s da eliminare" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Impossibile trovare l'immagine %(image_id)s da eliminare" #, python-format msgid "Failed to find image to delete: %s" msgstr "Impossibile trovare l'immagine da eliminare: %s" #, python-format msgid "Failed to find image to update: %s" msgstr "Impossibile trovare l'immagine da aggiornare: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Impossibile trovare il tipo di risorsa %(resourcetype)s da eliminare" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "Impossibile inizializzare il database cache immagini. Errore ricevuto: %s" #, python-format msgid "Failed to read %s from config" msgstr "Impossibile leggere %s dalla configurazione" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "Impossibile prenotare l'immagine. Errore: %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "Impossibile aggiornare i metadati immagine. Errore: %s" #, python-format msgid "Failed to upload image %s" msgstr "Caricamento immagine %s non riuscito" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Impossibile caricare i dati dell'immagine %(image_id)s a causa di un errore " "HTTP: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Impossibile caricare i dati dell'immagine %(image_id)s a causa di un errore " "interno: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Il file %(path)s ha un file di backup %(bfile)s non valido, operazione " "interrotta." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Le importazioni basata su file non sono consentite. Utilizzare un'origine " "dati dell'immagine non locale." msgid "File too large" msgstr "File troppo grande" msgid "File too small" msgstr "File troppo piccolo" msgid "Forbidden image access" msgstr "Accesso all'immagine vietato" #, python-format msgid "Forbidden to delete a %s image." msgstr "Divieto di eliminare un'immagine %s." #, python-format msgid "Forbidden to delete image: %s" msgstr "Divieto di eliminare l'immagine: %s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "Divieto di modificare '%(key)s' dell'immagine %(status)s." #, python-format msgid "Forbidden to modify '%s' of image." msgstr "Divieto di modificare '%s' dell'immagine." msgid "Forbidden to reserve image." msgstr "Vietato prenotare l'immagine." msgid "Forbidden to update deleted image." msgstr "Divieto di aggiornare l'immagine eliminata." #, python-format msgid "Forbidden to update image: %s" msgstr "Divieto di aggiornare l'immagine: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "Vietato tentativo di caricamento: %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Richiesta vietata, lo spazio dei nomi della definizione di metadati =%s non " "è visibile." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Richiesta vietata, l'attività %s non è visibile" msgid "Format of the container" msgstr "Formato del contenitore" msgid "Format of the disk" msgstr "Formato del disco" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "Richiamo dati blob %(name)s non riuscito: %(err)s." #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "Acquisizione dati immagine %(id)s non riuscita: %(err)s." msgid "Glance client not installed" msgstr "Client Glance non installato" #, python-format msgid "Host \"%s\" is not valid." msgstr "L'host \"%s\" non è valido." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host o porta \"%s\" non è valido." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "I messaggi informativi leggibili dall'utente sono inclusi solo se necessario " "(di solito in caso di errore)" msgid "If False doesn't trace SQL requests." msgstr "Se impostato su False non tiene traccia delle richieste SQL." msgid "If False fully disable profiling feature." msgstr "" "Se impostato su False disabilita completamente la funzione di creazione di " "profili." msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "Se False, il server restituirà l'intestazione \"Connection: close\", se " "True, il server restituirà \"Connection: Keep-Alive\" nelle proprie " "risposte. Per chiudere la connessione socket del client in modo esplicito " "dopo che la risposta è stata inviata e letta correttamente dal client, è " "necessario semplicemente impostare questa opzione su False durante la " "creazione di un server wsgi." msgid "If true, image will not be deletable." msgstr "Se true, l'immagine non sarà eliminabile." msgid "If true, namespace will not be deletable." msgstr "Se impostato su true, lo spazio dei nomi non sarà eliminabile." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "L'immagine %(id)s non può essere eliminata perché è in uso: %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "Immagine %(id)s non trovata" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Impossibile trovare l'immagine %(image_id)s dopo il caricamento. L'immagine " "potrebbe essere stata eliminata durante il caricamento: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "L'immagine %(image_id)s è protetta e non può essere eliminata." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "Impossibile trovare l'immagine %s dopo il caricamento. L'immagine potrebbe " "essere stata eliminata durante il caricamento. Eliminazione delle porzioni " "caricate." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "" "Impossibile trovare l'immagine %s dopo il caricamento. L'immagine potrebbe " "essere stata eliminata durante il caricamento." #, python-format msgid "Image %s is deactivated" msgstr "L'immagine %s è disattivata" #, python-format msgid "Image %s is not active" msgstr "L'immagine %s non è attiva" #, python-format msgid "Image %s not found." msgstr "Immagine %s non trovata." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "L'immagine supera la quota di memoria: %s" msgid "Image id is required." msgstr "ID immagine obbligatorio." msgid "Image is protected" msgstr "L'immagine è protetta" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "" "Superato il limite del membro dell'immagine per l'immagine %(id)s: %(e)s:" #, python-format msgid "Image name too long: %d" msgstr "Il nome dell'immagine è troppo lungo: %d" msgid "Image operation conflicts" msgstr "L'operazione dell'immagine è in conflitto" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Il passaggio di stato dell'immagine da %(cur_status)s a %(new_status)s non è " "consentito" #, python-format msgid "Image storage media is full: %s" msgstr "Il supporto di memorizzazione dell'immagine è pieno: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Superato il limite di tag dell'immagine per l'immagine %(id)s: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Problemi nel caricamento dell'immagine: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "Immagine con identificativo %s già esiste!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "L'immagine con identificativo %s è stata eliminata." #, python-format msgid "Image with identifier %s not found" msgstr "Impossibile trovare l'immagine con identificativo %s" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "L'immagine con l'id fornito %(image_id)s non è stata trovata" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Strategia di autenticazione errata, previsto \"%(expected)s\" ma ricevuto " "\"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Richiesta non corretta: %s" msgid "Index is out of range" msgstr "L'indice non è compreso nell'intervallo" msgid "Index is required" msgstr "L'indice è obbligatorio " #, python-format msgid "Input does not contain '%(key)s' field" msgstr "L'input non contiene il campo '%(key)s'" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "Autorizzazioni insufficienti sul supporto di memoria risorsa: %s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "" "Autorizzazioni insufficienti sul supporto di memorizzazione immagini: %s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "Tipo di contenuto non valido per il lavoro con %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Puntatore JSON non valido per questa risorsa: '/%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "Formato certificato non valido: %s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "Checksum non valido '%s': non può superare 32 caratteri " msgid "Invalid configuration in glance-swift conf file." msgstr "Configurazione nel file di configurazione glance-swift non valida." msgid "Invalid configuration in property protection file." msgstr "Configurazione non valida nel file di protezione della proprietà." #, python-format msgid "Invalid container format '%s' for image." msgstr "Formato del contenitore '%s' non valido per l'immagine." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Tipo contenuto non valido %(content_type)s" msgid "Invalid dict property type" msgstr "Tipo di proprietà del dizionario non valido" msgid "Invalid dict property type specification" msgstr "Specifica del tipo di proprietà del dizionario non valida" #, python-format msgid "Invalid disk format '%s' for image." msgstr "Formato del disco '%s' non valido per l'immagine." #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Valore filtro non valido %s. Le virgolette non sono chiuse." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Valore filtro non valido %s. Non è presente una virgola prima delle " "virgolette di chiusura." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Valore filtro non valido %s. Non è presente una virgola prima delle " "virgolette di apertura." #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "Intestazioni \"Content-Type\" non valide: %s" msgid "Invalid image id format" msgstr "Formato ID immagine non valido" msgid "Invalid item type specification" msgstr "Specifica del tipo di elemento non valida" #, python-format msgid "Invalid json body: %s" msgstr "Corpo JSON non valido: %s" msgid "Invalid jsonpatch request" msgstr "Richiesta jsonpatch non valida" msgid "Invalid location" msgstr "Ubicazione non valida" #, python-format msgid "Invalid location %s" msgstr "Ubicazione non valida %s" #, python-format msgid "Invalid location: %s" msgstr "Ubicazione non valida: %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "Opzione location_strategy non valida: %(name)s. Le opzioni strategia valide " "sono: %(strategies)s" msgid "Invalid locations" msgstr "Ubicazioni non valide" #, python-format msgid "Invalid locations: %s" msgstr "Ubicazioni non valide: %s" msgid "Invalid marker format" msgstr "Formato indicatore non valido" msgid "Invalid marker. Image could not be found." msgstr "Indicatore non valido. Impossibile trovare l'immagine." #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "mask_gen_algorithm non valido: %s" #, python-format msgid "Invalid membership association: %s" msgstr "Associazione di appartenenza non valida: %s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "Combinazione di formati di disco e contenitore non valida. Quando si imposta " "un formato disco o contenitore in uno dei seguenti 'aki', 'ari' o 'ami', i " "formati contenitore e disco devono corrispondere." #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Operazione non valida: `%(op)s`. Deve essere uno dei seguenti: %(available)s." msgid "Invalid position for adding a location." msgstr "Posizione non valida per l'aggiunta di una ubicazione." msgid "Invalid position for removing a location." msgstr "Posizione non valida per la rimozione di una ubicazione." msgid "Invalid property definition" msgstr "Definizione della proprietà non valida" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "pss_salt_length non valido: %s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "Tipo di chiave pubblica non valida per il tipo di chiave di firma: %s" msgid "Invalid reference list specification" msgstr "Specifica elenco di riferimento non valida" msgid "Invalid referenced type" msgstr "Tipo indicato non valido" msgid "Invalid request PATCH for work with blob" msgstr "PATCH richiesta non valido per il lavoro con blob" msgid "Invalid service catalog json." msgstr "json del catalogo del servizio non è valido." #, python-format msgid "Invalid signature hash method: %s" msgstr "Metodo hash della firma non valido: %s" #, python-format msgid "Invalid signature key type: %s" msgstr "Tipo di chiave di firma non valida: %s" #, python-format msgid "Invalid sort direction: %s" msgstr "Direzione ordinamento non valida: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "Chiave di ordinamento non valida: %(sort_key)s. Se la versione tipo non è " "impostata, deve essere una delle seguenti: %(available)s." #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Chiave di ordinamento non valida: %(sort_key)s. Deve essere una delle " "seguenti: %(available)s." #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" "Chiave di ordinamento non valida: %(sort_key)s. Non è possibile ordinare in " "base a questa proprietà " #, python-format msgid "Invalid status value: %s" msgstr "Valore di stato non valido: %s" #, python-format msgid "Invalid status: %s" msgstr "Stato non valido: %s" #, python-format msgid "Invalid time format for %s." msgstr "Formato ora non valido per %s." msgid "Invalid type definition" msgstr "Definizione del tipo non valida" #, python-format msgid "Invalid type value: %s" msgstr "Valore di tipo non valido: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Aggiornamento non valido. Potrebbe generare uno spazio dei nomi della " "definizione di metadati duplicato con lo stesso nome di %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Aggiornamento non valido. Potrebbe generare un oggetto della definizione di " "metadati duplicato con lo stesso nome=%(name)s nello spazio dei nomi" "%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Aggiornamento non valido. Potrebbe generare un oggetto della definizione di " "metadati duplicato con lo stesso nome=%(name)s nello spazio dei nomi" "%(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Aggiornamento non valido. Potrebbe generare uno spazio dei nomi della " "definizione di metadati duplicato con lo stesso nome=%(name)s nello spazio " "dei nomi=%(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "" "Valore '%(value)s' non valido per il parametro '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Valore non valido per l'opzione %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Valore visibilità non valido: %s" msgid "Is not allowed value" msgstr "Non è un valore consentito" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "Sembra che il modulo eventlet sia stato importato prima dell'impostazione di " "%s='yes'. Attualmente è necessario disabilitare eventlet.greendns se si " "utilizza ipv6, poiché eventlet.greendns non funziona correttamente con gli " "indirizzi ipv6. Verificare che eventlet non sia impostato prima effettuare " "di questa impostazione." msgid "It's invalid to provide multiple image sources." msgstr "Non è valido per fornire più origini delle immagini." msgid "It's not allowed to add locations if locations are invisible." msgstr "" "Non è consentito aggiungere ubicazione se le ubicazioni sono invisibili." msgid "It's not allowed to remove locations if locations are invisible." msgstr "" "Non è consentito rimuovere ubicazioni se le ubicazioni sono invisibili." msgid "It's not allowed to update locations if locations are invisible." msgstr "Non è consentito caricare ubicazioni se le ubicazioni sono invisibili." msgid "Items have to be unique" msgstr "Gli elementi devono essere univoci" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "Il percorso Json deve iniziare con un '/', terminare con nessun '/', non " "sono consentiti 2successivi." msgid "Legacy image was not found" msgstr "Immagine legacy non trovata" msgid "Length is greater than maximum" msgstr "La lunghezza è superiore a quella massima" msgid "Length is less than minimum" msgstr "La lunghezza è inferiore al minimo" msgid "Limit param must be an integer" msgstr "Il parametro limite deve essere un numero intero" msgid "Limit param must be positive" msgstr "Il parametro limite deve essere un numero positivo" #, python-format msgid "Limit param must not be higher than %d" msgstr "Il parametro limite non deve essere maggiore di %d" msgid "Limits request ID length." msgstr "Limita la lunghezza dell'ID richiesta. " msgid "List definitions may hot have defaults" msgstr "Le definizioni dell'elenco non possono contenere valori predefiniti" msgid "List of strings related to the image" msgstr "Elenco di stringhe relative all'immagine" msgid "List size is greater than maximum" msgstr "La dimensione dell'elenco è superiore di quella massima" msgid "List size is less than minimum" msgstr "La dimensione dell'elenco è inferiore a quella minima" msgid "Loop time between checking for new items to schedule for delete." msgstr "" "Periodo di loop tra le verifiche per i nuovi elementi da pianificare per " "l'eliminazione." #, python-format msgid "Malformed Content-Range header: %s" msgstr "Intestazione intervallo-contenuto non corretta: %s" msgid "Malformed JSON in request body." msgstr "JSON non corretto nel corpo della richiesta." msgid "Max string length may not exceed 255 characters" msgstr "La lunghezza massima della stringa non può superare i 255 caratteri" msgid "Maximal age is count of days since epoch." msgstr "L'età massima è il numero di giorni dal periodo." msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "Dimensione massima della riga di intestazioni del messaggio che deve essere " "accettata. max_header_line dovrebbe essere incrementato quando si utilizzano " "token grandi (in genere quelli generati dall'API Keystone v3 con cataloghi " "del servizio di grandi dimensioni" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" "Numero massimo di membri per immagine. I valori negativi definiscono un " "valore un numero illimitato." msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Il numero massimo di ubicazioni consentite su un'immagine. I valori negativi " "definiscono un numero illimitato." msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "" "Il numero massimo di proprietà consentite su un'immagine. I valori negativi " "definiscono un numero illimitato." msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Il numero massimo di tag consentiti su un'immagine. I valori negativi " "definiscono un valore un numero illimitato." msgid "Maximum permissible number of items that could be returned by a request" msgstr "" "Il numero massimo di elementi consentiti che può essere restituito da una " "richiesta" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Il numero massimo di rendirizzamenti (%(redirects)s) è stato superato." msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "La dimensione massima in byte dell'immagine che un utente può caricare. " "Viene impostata su 1099511627776 byte (1 TB). AVVERTENZA: questo valore deve " "essere aumentato solo dopo un'attenta valutazione e deve essere impostato su " "un valore inferiore a 8 EB (9223372036854775808)." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Il membro %(member_id)s è il duplicato dell'immagine %(image_id)s" msgid "Member can't be empty" msgstr "Il membro non può essere vuoto" msgid "Member to be added not specified" msgstr "Membro da aggiungere non specificato" msgid "Membership could not be found." msgstr "Impossibile trovare l'appartenenza." #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "Lo spazio dei nomi della definizione di metadati %(namespace)s è protetto e " "non è possibile eliminarlo." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "" "Lo spazio dei nomi della definizione dei metadati per l'id=%s non è stato " "trovato" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "" "Lo spazio dei nomi della definizione di metadati=%(namespace_name)s non è " "stato trovato." #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "L'oggetto di definizione di metadati %(object_name)s è protetto e non è " "possibile eliminarlo." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "" "L'oggetto della definizione dei metadati per l'id=%s non è stato trovato" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "La proprietà della definizione di metadati %(property_name)s è protetta e " "non è possibile eliminarlo." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "" "La proprietà della definizione dei metadati per l'id=%s non è stata trovata" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Il tipo-risorsa della definizione di metadati %(resource_type_name)s è un " "tipo inserito dalsistema e non è possibile eliminarlo." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "L'associazione-tipo-risorsa della definizione di metadati %(resource_type)s " "è protetta e non può essere eliminata." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Il tag di definizione dei metadati %(tag_name)s è protetto e non può essere " "eliminato." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Il tag di definizione dei metadati per l'id=%s non è stato trovato" msgid "Min string length may not be negative" msgstr "La lunghezza minima della stringa non può essere negativa" msgid "Minimal rows limit is 1." msgstr "Il limite di righe minimo è 1." #, python-format msgid "Missing required credential: %(required)s" msgstr "Credenziale richiesta mancante: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Il servizio 'immagine' multipla corrisponde nella regione %(region)s. Questo " "in genere significa che una regione è obbligatoria e non ne è stata fornita " "una." msgid "Must supply a positive, non-zero value for age." msgstr "È necessario fornire un valore positivo, diverso da zero per l'età." msgid "Name of the paste configuration file." msgstr "Nome del file di configurazione paste." #, python-format msgid "No artifact found with ID %s" msgstr "Nessuna risorsa trovata con ID %s" msgid "No authenticated user" msgstr "Nessun utente autenticato" #, python-format msgid "No image found with ID %s" msgstr "Nessuna immagine trovata con ID %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "" "Non è stata trovata nessuna ubicazione con ID %(loc)s dall'immagine %(img)s" msgid "No permission to share that image" msgstr "Nessuna autorizzazione per condividere tale immagine" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "Non è stato caricato nessun plugin per '%(name)s" msgid "No property to access" msgstr "Nessuna proprietà da accedere" #, python-format msgid "No such key '%s' in a dict" msgstr "Nessuna chiave '%s' in un dict" #, python-format msgid "Not a blob property '%s'" msgstr "Non è una proprietà blob '%s'" msgid "Not a downloadable entity" msgstr "Non è un entità scaricabile " msgid "Not a list property" msgstr "Non è una proprietà elenco" #, python-format msgid "Not a list property '%s'" msgstr "Non è una proprietà elenco '%s'" msgid "Not a valid value type" msgstr "Non un tipo di valore valido" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "Non tutte le dipendenze sono nello stato '%s' " #, python-format msgid "Not allowed to create members for image %s." msgstr "Non è consentito creare membri per l'immagine %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Disattivazione dell'immagine in stato '%s' non consentita" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Non è consentito eliminare i membri dell'immagine %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Non è consentito eliminare i tag dell'immagine %s." #, python-format msgid "Not allowed to list members for image %s." msgstr "Non è consentito elencare i membri dell'immagine %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Riattivazione dell'immagine in stato '%s' non consentita" #, python-format msgid "Not allowed to update members for image %s." msgstr "Non è consentito aggiornare i membri dell'immagine %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Non è consentito aggiornare i tag dell'immagine %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "Non è consentito caricare i dati dell'immagine per l'immagine %(image_id)s: " "%(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "Non è un indice array '%s' " msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "Il numero di directory di ordinamento non corrisponde al numero di chiavi di " "ordinamento" msgid "OVA extract is limited to admin" msgstr "L'estrazione OVA è limitata all'amministratore" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "I metadati OVF di interesse non sono stati specificati nel file di " "configurazione ovf-metadata.json. Impostare \"cim_pasd\" su un elenco di " "CIM_ProcessorAllocationSettingData properties di interesse." msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "" "File di configurazione delle proprietà OVF \"ovf-metadata.json\" non trovato." msgid "Old and new sorting syntax cannot be combined" msgstr "Impossibile combinare la nuova e la precedente sintassi di ordinamento" msgid "Only list indexes are allowed for blob lists" msgstr "Solo gli indici elenco sono consentiti per gli elenchi blob " #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "L'operazione \"%s\" richiede un membro denominato \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Gli oggetti dell'operazione devono contenere esattamente un membro " "denominato \"add\", \"remove\" o \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Gli oggetti dell'operazione devono contenere solo un membro denominato \"add" "\", \" remove \" o \"replace\"." msgid "Operations must be JSON objects." msgstr "Le operazioni devono essere oggetti JSON." #, python-format msgid "Operator %(op)s is not supported" msgstr "L'operatore %(op)s non è supportato" #, python-format msgid "Original locations is not empty: %s" msgstr "Le ubicazioni originali non sono vuote: %s" msgid "Owner can't be updated by non admin." msgstr "Il proprietario non può essere aggiornato da un non admin. " msgid "Owner must be specified to create a tag." msgstr "Il proprietario deve specificare per creare un tag." msgid "Owner of the image" msgstr "Proprietario dell'immagine" msgid "Owner of the namespace." msgstr "Proprietario dello spazio dei nomi." msgid "Param values can't contain 4 byte unicode." msgstr "I valori dei parametri non possono contenere 4 byte unicode." msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "Nome parziale di una pipeline nel file di configurazione paste dove il nome " "servizio è stato rimosso. Ad esempio, se il nome sezione paste [pipeline:" "glance-api-keystone] utilizza il valore \"keystone\"" msgid "Path to the directory where json metadata files are stored" msgstr "" "Percorso per la directory in cui sono archiviati i file json di metadati" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" "Il nome plugin '%(plugin)s' deve corrispondere al typename della risorsa " "utente %(name)s'" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "Il puntatore `%s` contiene \"~\" che non fa parte di una sequenza escape " "riconosciuta." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Il puntatore `%s` contiene l'adiacente \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Il puntatore `%s` non contiene token valido." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Il puntatore `%s` non inizia con \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Il puntatore `%s` finisce con \"/\"." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "" "Il puntatore contiene '~' che non fa parte di una sequenza escape " "riconosciuta [~0, ~1]." #, python-format msgid "Port \"%s\" is not valid." msgstr "La porta \"%s\" non è valida." msgid "Port the registry server is listening on." msgstr "La porta in cui il server del registro è in ascolto." #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "Componente numerico prerelease troppo grande (massimo %d caratteri)" msgid "Private key file to use when starting API server securely." msgstr "" "File di chiave privata da utilizzare quando si avvia il server API in modo " "sicuro." #, python-format msgid "Process %d not running" msgstr "Il processo %d non è in esecuzione" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Le proprietà %s devono essere impostate prima di salvare i dati." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "La proprietà %(property_name)s non inizia con il prefisso di associazione " "del tipo di risorsa previsto '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "La proprietà %s è già presente." #, python-format msgid "Property %s does not exist." msgstr "La proprietà %s non esiste." #, python-format msgid "Property %s may not be removed." msgstr "La proprietà %s non può essere rimossa." #, python-format msgid "Property %s must be set prior to saving data." msgstr "La proprietà %s deve essere impostata prima di salvare i dati." #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "La proprietà '%(name)s' non può avere un valore '%(val)s': %(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "La proprietà '%s' è protetta" msgid "Property names can't contain 4 byte unicode." msgstr "I nomi delle proprietà non possono contenere 4 byte unicode." #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "La protezione della proprietà nell'operazione %(operation)s della regola " "%(rule)s non è stata trovata. L'esecuzione di questa operazione non sarà " "consentita ad alcun ruolo." #, python-format msgid "Property's %(prop)s value has not been found" msgstr "Impossibile trovare il valore della proprietà %(prop)s" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "La dimensione dell'immagine fornita deve corrispondere alla dimensione " "dell'immagine memorizzata. (dimensione fornita: %(ps)d, dimensione " "memorizzata: %(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "L'oggetto fornito non corrisponde allo schema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Lo stato dell'attività fornito non è supportato: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Il tipo dell'attività fornito non è supportato: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Fornisce una semplice descrizione utente dello spazio dei nomi." msgid "Public images do not have members." msgstr "Le immagini pubblica non hanno membri." msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "L'URL pubblico da utilizzare per l'endpoint della versione. Il valore " "predefinito è None, che utilizzerà l'attributo host_url della richiesta per " "popolare la base dell'URL. Se Glance è dietro ad un proxy, sarà necessario " "cambiare questo valore riportando l'URL proxy." msgid "Python module path of data access API" msgstr "Il percorso del modulo Python dell'API di accesso dati" msgid "Received invalid HTTP redirect." msgstr "Ricevuto un reindirizzamento HTTP non valido." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Reindirizzamento a %(uri)s per l'autorizzazione." #, python-format msgid "Registry service can't use %s" msgstr "Il servizio registro non può utilizzare %s" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Il registro non è stato configurato correttamente sul server API. Motivo: " "%(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "La relazione %(name)s non può avere più valori" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Ricaricamento di %(serv)s non supportato" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Ricaricamento %(serv)s (pid %(pid)s) con segnale(%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Rimozione del file pid %s obsoleto in corso" msgid "Request body must be a JSON array of operation objects." msgstr "" "Il corpo della richiesta deve essere un array JSON degli oggetti " "dell'operazione." msgid "Request must be a list of commands" msgstr "La richiesta deve essere un elenco di comandi" msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "" "Le proprietà dell'immagine richieste per la verifica della firma non " "esistono. Impossibile verificare la firma." #, python-format msgid "Required store %s is invalid" msgstr "Archivio richiesto %s non valido" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "I nomi del tipo di risorsa devono essere allineati con i tipi di risorsa " "Heat quando possibile: http://docs.openstack.org/developer/heat/" "template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "La risposta dal Keystone non contiene un endpoint Glance." msgid "Role used to identify an authenticated user as administrator." msgstr "" "Ruolo utilizzato per identificare un utente autenticato come amministratore." msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "Eseguire come un processo di lunga elaborazione. Quando non specificato " "(impostazione predefinita) eseguire l'operazione di ripulitura una volta e " "quindi uscire. Quando specificato non uscire ed eseguire la ripulitura ad " "intervalli wakeup_time come specificato nella configurazione." msgid "Scope of image accessibility" msgstr "Ambito di accessibilità dell'immagine" msgid "Scope of namespace accessibility." msgstr "Ambito di accessibilità dello spazio dei nomi." msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "" "Chiave segreta da utilizzare per firmare i messaggi di traccia dei servizi " "Glance API e Glance Registry." #, python-format msgid "Server %(serv)s is stopped" msgstr "Il server %(serv)s è stato arrestato" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Creazione dell'operatore server non riuscita: %(reason)s." msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "Impostare un'ampia quota di sistema per ogni utente. Questo valore " "rappresenta la capacità totale che un utente può utilizzare tra tutti i " "sistemi di archiviazione. Un valore pari a 0 indica una capacità illimitata. " "Per un valore è possibile specificare unità opzionali. Le unità accettate " "sono B, KB, MB, GB e TB che rappresentano rispettivamente Byte, KiloByte, " "MegaByte, GigaByte e TeraByte. Se non è specificata nessuna unità, vengono " "utilizzati i Byte. Tener presente che non devono essere presenti spazi tra " "il valore e l'unità e le unità sono sensibili al maiuscolo/minuscolo." #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "Mostra livello %(shl)s non è supportata in questa operazione" msgid "Signature verification failed" msgstr "Verifica firma non riuscita" msgid "Signature verification failed." msgstr "Verifica firma non riuscita." msgid "Size of image file in bytes" msgstr "Dimensione del file di immagine in byte" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Alcuni tipi di risorsa consentono più di una coppia chiave / valore per " "istanza. Ad esempio, Cinder consente metadati immagine ed utente sui " "volumi. Solo i metadati delle proprietà dell'immagine vengono valutati da " "Nova (pianificazione o driver). Questa proprietà consente una destinazione " "dello spazio dei nomi per eliminare l'ambiguità." msgid "Sort direction supplied was not valid." msgstr "La direzione di ordinamento fornita non è valida." msgid "Sort key supplied was not valid." msgstr "La chiave di ordinamento fornita non è valida." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Specifica il prefisso da utilizzare per il tipo di risorsa fornito. " "Qualsiasi proprietà nello spazio dei nomi deve essere preceduta da un " "prefisso quando viene applicata ad un tipo di risorsa specificato. Deve " "includere un separatore di prefisso (ad esempio due punti :)." msgid "Specifies which task executor to be used to run the task scripts." msgstr "" "Specifica quale esecutore attività deve essere utilizzato per eseguire gli " "script di attività." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Lo stato deve essere \"pending\", \"accepted\" o \"rejected\"." msgid "Status not specified" msgstr "Stato non specificato" msgid "Status of the image" msgstr "Stato dell'immagine" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Il passaggio di stato da %(cur_status)s a %(new_status)s non è consentito" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Arresto di %(serv)s in corso (pid %(pid)s) con segnale(%(sig)s)" #, python-format msgid "Store for image_id not found: %s" msgstr "Archivio per image_id non trovato: %s" #, python-format msgid "Store for scheme %s not found" msgstr "Archivio per lo schema %s non trovato" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "%(attr)s (%(supplied)s) e %(attr)s fornito e generato dall'immagine caricata " "(%(actual)s) non corrispondevano. Lo stato dell'immagine viene impostato su " "'killed'." msgid "Supported values for the 'container_format' image attribute" msgstr "Valori supportati per l'attributo di immagine 'container_format'" msgid "Supported values for the 'disk_format' image attribute" msgstr "Valori supportati per l'attributo di immagine 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Respawn soppresso come %(serv)s era %(rsn)s." msgid "System SIGHUP signal received." msgstr "Ricevuto segnale SIGHUP di sistema." #, python-format msgid "Task '%s' is required" msgstr "Attività '%s' obbligatoria" msgid "Task does not exist" msgstr "L'attività non esiste" msgid "Task failed due to Internal Error" msgstr "Attività non riuscita a causa di un errore interno " msgid "Task was not configured properly" msgstr "L'attività non è stata configurata correttamente" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "L'attività con l'id fornito %(task_id)s non è stata trovata" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "Il filtro \"changes-since\" non è più disponibile su v2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "Il file CA specificato %s non esiste" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "L'oggetto immagine %(image_id)s, in fase di creazione da questa attività " "%(task_id)s, non si trova più in uno stato che ne consenta ulteriori " "elaborazioni." msgid "The Store URI was malformed." msgstr "L'URI della memoria non era corretto." msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "L'URL per il servizio keystone. Se \"use_user_token\" non è attiva e si " "utilizza l'auth keystone, è possibile specificare l'URL di keystone." msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "" "L'indirizzo in cui il servizio di autenticazione Swift è in ascolto." "(obsoleto)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "La password degli amministratori. Se non è attiva \"use_user_token\", è " "possibile specificare le credenziali admin." msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Il nome utente degli amministratori. Se non è attiva \"use_user_token\" , è " "possibile specificare le credenziali admin." msgid "The amount of time in seconds to delay before performing a delete." msgstr "" "La quantità di tempo in secondi da posticipare prima di eseguire " "un'eliminazione." msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "La quantità di tempo in cui un'immagine incompleta può rimanere nella cache " "prima che il ripulitore cache, se in esecuzione, rimuoverà l'immagine " "incompleta." msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "" "Il valore backlog che verrà utilizzato durante la creazione del socket " "listener TCP." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Il file certificato specificato %s non esiste" msgid "The config file that has the swift account(s)configs." msgstr "" "Il file di configurazione che contiene le configurazioni account Swift." msgid "The current status of this task" msgstr "Lo stato corrente di questa attività" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "L'unità in cui si trova la directory cache dell'immagine %(image_cache_dir)s " "non supporta xattr. Probabilmente è necessario modificare fstab e aggiungere " "l'opzione user_xattr nella riga appropriata per l'unità che ospita la " "directory cache." msgid "The driver to use for image cache management." msgstr "Il driver da utilizzare per la gestione della cache dell'immagine." #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "" "Il formato della versione %s non è valido. Utilizzare la notazione semver" msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "Il formato in cui le immagini verranno convertite automaticamente. Quando " "si utilizza il backend RBD, deve essere impostato su 'raw'" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "L'URI fornito non è valido. Specificare un URI valido dal seguente elenco di " "uri supportati %(supported)s" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "" "Il nome host/IP del processo pydev in ascolto per le connessioni di debug" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "L'immagine %s è già presente sul server slave ma non è stata trovata durante " "il controllo. Ciò indica che non si dispone delle autorizzazioni necessarie " "per visualizzare tutte le le immagini sul server slave." #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "La risorsa in entrata è troppo grande: %s" #, python-format msgid "The incoming image is too large: %s" msgstr "L'immagine in entrata è troppo grande: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Il file chiave specificato %snon esiste" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Il limite di ubicazioni immagine consentito è stato superato. Tentato: " "%(attempted)s, Massimo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Il limite di membri dell'immagine consentito è stato superato in questa " "immagine. Tentato: %(attempted)s, Massimo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Il limite di proprietà immagine consentito è stato superato. Tentato: " "%(attempted)s, Massimo: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "Il limite di proprietà immagine consentito è stato superato. Tentato: " "%(num)s, Massimo: %(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "Il limite di tag immagine consentito è stato superato. Tentato: " "%(attempted)s, Massimo: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "L'ubicazione %(location)s esiste già" #, python-format msgid "The location data has an invalid ID: %d" msgstr "I dati dell'ubicazione hanno un ID non valido: %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "Il percorso del file di protezione delle proprietà. Tale file contiene le " "regole per le protezioni delle proprietà ed i ruoli/politiche associate. Se " "questo valore di configurazione non viene specificato, per impostazione " "predefinita le protezioni delle proprietà non verranno applicate. Se viene " "specificato un valore ed il file non viene trovato, il servizio glance-api " "non verrà avviato." #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "La definizione di metadati %(record_type)s con nome=%(record_name)s non è " "eliminata. Altri record ancora fanno riferimento a tale definizione." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "Lo spazio dei nomi della definizione di metadati =%(namespace_name)s già " "esiste." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "L'oggetto della definizione di metadati con nome=%(object_name)s non è stato " "trovato nello spazio dei nomi=%(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "La proprietà della definizione di metadati con nome=%(property_name)s non è " "stata trovata nello spazio dei nomi=%(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "L'associazione tipo-risorsa della definizione di metadati del tipo-risorsa=" "%(resource_type_name)s per lo spazio dei nomi=%(namespace_name)s già esiste." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "L'associazione tipo-risorsa della definizione di metadati del tipo-risorsa=" "%(resource_type_name)s per lo spazio dei nomi=%(namespace_name)s, non è " "stata trovata." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "Il tipo-risorsa della definizione di metadati con nome=" "%(resource_type_name)s, non è stato trovato." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "Il tag di definizione dei metadati con nome=%(name)s non è stato trovato " "nello spazio dei nomi=%(namespace_name)s." msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "" "La modalità in cui verrà eseguito il motore. Può essere 'seriale' o " "'parallela'." msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "Il numero di operatori del processo secondario che verrà creato per " "soddisfare le richieste. Il valore predefinito sarà uguale al numero di CPU " "disponibili." msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "Il numero di attività parallele eseguite contemporaneamente dal motore. Il " "valore può essere maggiore di uno quando la modalità del motore è " "'parallela'." msgid "The parameters required by task, JSON blob" msgstr "I parametri richiesti dall'attività, blob JSON" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "Il percorso per il file cert da utilizzare nelle connessioni SSL al server " "del registro, se presente. In alternativa è possibile impostare la variabile " "di ambiente GLANCE_CLIENT_CERT_FILE sul percorso del file cert CA." msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "Il percorso per il file cert dell'autorità di certificazione da utilizzare " "nelle connessioni SSL al server del registro, se presente. In alternativa è " "possibile impostare la variabile di ambiente GLANCE_CLIENT_CA_FILE su un " "percorso del file cert CA." msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "Il percorso del file di chiavi da utilizzare nelle connessioni SSL al server " "di registro, se presente. In alternativa, è possibile impostare la variabile " "di ambiente GLANCE_CLIENT_KEY_FILE su un percorso del file di chiavi" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "" "Il percorso al database del file sqlite che verrà utilizzato per la gestione " "della cache dell'immagine." msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "Il periodo di tempo, in secondi, che il server API attende affinché una " "richiesta del registro venga completata. Un valore pari a 0 indica nessun " "timeout." msgid "The port on which a pydev process is listening for connections." msgstr "La porta in cui il processo pydev è in ascolto per le connessioni." msgid "The port on which the server will listen." msgstr "La porta su cui il server sarà in ascolto." msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "" "Il protocollo da utilizzare per le comunicazioni con il server di registro. " "Http o https." #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "" "Il corpo fornito %(body)s non è valido nello schema fornito: %(schema)s" msgid "The provided image is too large." msgstr "L'immagine fornita è troppo grande." #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "Il percorso fornito '%(path)s' non è valido: %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "Il riferimento ai parametri di memorizzazione di account/backing Swift " "predefiniti per utilizzare l'aggiunta di nuove immagini." msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "la regione per il servizio di autenticazione. Se \"use_user_token\" non è " "attiva e si utilizza l'autenticazione keystone, è possibile specificare il " "nome regione." msgid "The request returned 500 Internal Server Error." msgstr "La richiesta ha restituito 500 Errore interno del server." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "La richiesta ha restituito 503 Servizio non disponibile 503. Ciò " "generalmente si verifica nel sovraccarico del servizio o altro tipo di " "interruzione temporanea." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "La richiesta ha restituito 302 scelte multiple. Questo generalmente indica " "che non è stato incluso un indicatore di versione in un URI della " "richiesta.\n" "\n" "Restituito il corpo della risposta:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La richiesta ha restituito 413 Entità della richiesta troppo grande. Questo " "generalmente significa che il limite della velocità o la soglia della quota " "sono stati violati.\n" "\n" "Il corpo della risposta \n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La richiesta ha restituito uno stato imprevisto: %(status)s.\n" "\n" "Il corpo della risposta \n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "L'immagine richiesta è stata disattivata. Il download dei dati immagine non " "è consentito." msgid "The result of current task, JSON blob" msgstr "Il risultato dell'attività corrente, blob JSON" msgid "The signature data was not properly encoded using base64" msgstr "I dati di firma non sono stati codificati correttamente tramite base64" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "La dimensione dei dati %(image_size)s supererà il limite. %(remaining)s byte " "rimanenti." msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "La dimensione del pool di thread da utilizzare per la ripulitura delle " "immagini. Il valore predefinito è uno, che indica la ripulitura di serie. " "Qualsiasi valore superiore a uno indica il numero massimo di immagini che " "possono essere ripulite in parallelo. " #, python-format msgid "The specified member %s could not be found" msgstr "Impossibile trovare il membro specificato %s" #, python-format msgid "The specified metadata object %s could not be found" msgstr "Impossibile trovare l'oggetto di metadati %s specificato" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "Impossibile trovare il tag di metadati %s specificato" #, python-format msgid "The specified namespace %s could not be found" msgstr "Impossibile trovare lo spazio dei nomi %s specificato" #, python-format msgid "The specified property %s could not be found" msgstr "Impossibile trovare la proprietà %s specificata" #, python-format msgid "The specified resource type %s could not be found " msgstr "Impossibile trovare il tipo di risorsa %s specificato " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "Lo stato dell'ubicazione immagine eliminata può essere impostata solo su " "'pending_delete' o 'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "Lo stato dell'ubicazione immagine eliminata può essere impostata solo su " "'pending_delete' o 'deleted'." msgid "The status of this image member" msgstr "Lo stato di questo membro dell'immagine" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "I nomi degli archivi da utilizzare per ottenere l'ordine di preferenza degli " "archivi. Il nome deve essere registrato da uno degli archivi definiti " "dall'opzione di configurazione 'stores'. Questa opzione verrà applicata " "quando si utilizza l'opzione 'store_type' come strategia di ubicazione " "dell'immagine definita dall'opzione di configurazione 'location_strategy'." msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "La strategia da utilizzare per l'autenticazione. Se \"use_user_token\" non è " "attiva è possibile specificare la strategia di autenticazione." #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "Il membro di destinazione %(member_id)s è già associato all'immagine " "%(image_id)s." msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "Il nome tenant dell'utente amministrativo. Se \"use_user_token\" non è " "attiva è possibile specificare il nome tenant admin." msgid "The type of task represented by this content" msgstr "Il tipo di attività rappresentata da questo contenuto" msgid "The unique namespace text." msgstr "Il testo dello spazio dei nomi univoco." msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "Il limite superiore (dimensione massima della cache accumulata in byte) " "oltre il quale il programma di eliminazione cache, se in esecuzione, inizia " "a ripulire la cache delle immagini." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Il nome utente semplice per lo spazio dei nomi. Utilizzato dalla UI se " "disponibile." msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "" "L'utente da autenticare per il servizio di autenticazione Swift (obsoleto)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "Il valore dell'opzione socket TCP_KEEPIDLE. Rappresenta il periodo di tempo, " "in secondi, per cui la connessione deve essere inattiva prima che il TCP " "inizi ad inviare probe keepalive." #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "Si è verificato un problema in %(error_key_name)s %(error_filename)s. " "Verificare. Errore: %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "Si è verificato un problema in %(error_key_name)s %(error_filename)s. " "Verificare. Errore OpenSSL: %(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "Si è verificato un problema con la coppia di chiavi. Verificare che il cert " "%(cert_file)s e la chiave %(key_file)s siano collegati. Errore OpenSSL " "%(ce)s" msgid "There was an error configuring the client." msgstr "Si è verificato un errore durante la configurazione del client." msgid "There was an error connecting to a server" msgstr "Si è verificato un errore durante la connessione al server" msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "Questo valore di configurazione indica se si utilizzano i \"ruoli\" o le " "\"politiche\" nel file di protezione delle proprietà." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Questa operazione non è attualmente consentita nelle attività Glance. " "Vengono automaticamente eliminate al raggiungimento dell'ora in base alla " "proprietà expires_at." msgid "This operation is currently not permitted on Glance images details." msgstr "" "Questa operazione non è attualmente consentita nei dettagli delle immagini " "Glance." msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "Questo valore imposta la strategia che verrà utilizzata per determinare " "l'ordine di ubicazione dell'immagine. Attualmente due strategie sono " "inserite nel package assieme a Glance 'location_order' e 'store_type'." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Periodo di tempo, in ore, per cui l'attività prosegue dopo l'esito positivo " "o meno" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "Timeout per le operazioni socket delle connessioni client. Se una " "connessione in entrata è inattiva per questo numero di secondi, verrà " "chiusa. Il valore 0 indica un'attesa illimitata." msgid "Too few arguments." msgstr "Troppo pochi argomenti." msgid "Too few locations" msgstr "Troppo poche ubicazioni" msgid "Too many locations" msgstr "Troppe ubicazioni" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "La dimensione totale è di %(size)d byte tra le immagini %(img_count)d" msgid "Turn on/off delayed delete." msgstr "Attiva/disattiva eliminazione posticipata." msgid "Type version has to be a valid semver string" msgstr "La versione del tipo deve essere una stringa semver valida" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "L'URI non può contenere più di una ricorrenza di uno schema. Se è stato " "specificato un URI come swift://user:pass@http://authurl.com/v1/container/" "obj, è necessario modificarlo per utilizzare lo schema swift+http://, come: " "swift+http://user:pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "URL per accedere al file di immagini tenuto nell'archivio esterno" msgid "Unable to PUT to non-empty collection" msgstr "Impossibile eseguire PUT in una raccolta non vuota " #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Impossibile creare il file pid %(pid)s. Eseguire come non-root?\n" "Ritorno a un file temporaneo; è possibile arrestare il servizio %(service)s " "utilizzando:\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Impossibile filtrare mediante un operatore sconosciuto '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "" "Impossibile filtrare in base a un intervallo con un valore non numerico." msgid "Unable to filter on a unknown operator." msgstr "Impossibile filtrare su un operatore sconosciuto." msgid "Unable to filter using the specified operator." msgstr "Impossibile filtrare utilizzando l'operatore specificato." msgid "Unable to filter using the specified range." msgstr "Impossibile filtrare utilizzando l'intervallo specificato." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Impossibile trovare '%s' nella modifica dello schema JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Impossibile trovare `op` in modifica schema JSON. Deve essere uno dei " "seguenti: %(available)s." msgid "Unable to get legacy image" msgstr "Impossibile ottenere l'immagine legacy" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Impossibile aumentare il limite del descrittore di file. Eseguire come non-" "root?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Impossibile caricare %(app_name)s dal file di configurazione %(conf_file)s.\n" "Ricevuto: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Impossibile caricare lo schema: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Impossibile individuare il file di configurazione paste per %s." msgid "Unable to modify collection in immutable or readonly property" msgstr "" "Impossibile modificare la raccolta in una proprietà immutabile o in sola " "lettura" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "Impossibile richiamare il certificato con ID: %s " msgid "Unable to retrieve request id from context" msgstr "Impossibile recuperare l'ID richiesta dal contesto" msgid "Unable to specify artifact type explicitly" msgstr "Impossibile specificare il tipo di risorsa utente esplicitamente" msgid "Unable to specify artifact type version explicitly" msgstr "" "Impossibile specificare la versione del tipo di risorsa utente esplicitamente" msgid "Unable to specify version if multiple types are possible" msgstr "Impossibile specificare la versione se sono possibili più tipi" msgid "Unable to specify version if type is not specified" msgstr "Impossibile specificare la versione se non è specificato il tipo" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "" "Impossibile caricare i dati dell'immagine duplicata per l'immagine " "%(image_id)s: %(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "" "Impossibile verificare la firma perché l'algoritmo non è supportato su " "questo sistema" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "Impossibile verificare la firma: %(reason)s " msgid "Unauthorized image access" msgstr "Accesso all'immagine non autorizzato" msgid "Unexpected body type. Expected list/dict." msgstr "Tipo di corpo imprevisto. Elenco/dizionario previsto." #, python-format msgid "Unexpected response: %s" msgstr "Risposta imprevista: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Strategia di autenticazione sconosciuta '%s'" #, python-format msgid "Unknown command: %s" msgstr "Comando sconosciuto: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Direzione ordinamento sconosciuta, deve essere 'desc' o 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Versione della bozza dello schema JSON non riconosciuta" msgid "Unrecognized changes-since value" msgstr "Valore changes-since non riconosciuto" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "sort_dir non supportato. Valori consentiti: %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "sort_key non supportato. Valori consentiti: %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "Valore %(value)d fuori dall'intervallo, non deve superare %(max)d" msgid "Value is greater than maximum" msgstr "Il valore è superiore al massimo" msgid "Value is less than minimum" msgstr "Il valore è inferiore al minimo" msgid "Value is required" msgstr "È necessario un valore" #, python-format msgid "Version component is too large (%d max)" msgstr "Componente della versione troppo grande (%d massimo)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "La versione non è valida: %(reason)s" msgid "Virtual size of image in bytes" msgstr "Dimensione virtuale dell'immagine in byte" msgid "Visibility must be either \"public\" or \"private\"" msgstr "La visibilità deve essere \"public\" o \"private\"" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "Entro 15 secondi il pid %(pid)s (%(file)s) verrà interrotto; terminato" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "Quando false, non può essere caricata nessuna risorsa utente " "indipendentemente da available_plugins. Quando true, le risorse utente non " "possono essere caricate." msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "Quando si esegue il server in modalità SSL, è necessario specificare sia un " "valore dell'opzione cert_file che key_file nel file di configurazione" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "Quando è true, questa opzione imposta il proprietario di un'immagine " "affinché diventi tenant. In caso contrario, il proprietario dell'immagine " "sarà l'utente autenticato che avvia la richiesta." msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "Quando si utilizza SSL nelle connessioni al server di registro, non " "richiedere la convalida tramite un'autorità di certificazione. Ciò equivale " "alla specifica di --insecure sulla riga comandi utilizzando glanceclient per " "l'API." msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "Indica se consentire agli utenti di specificare le proprietà dell'immagine " "al di là di quanto fornisce lo schema dell'immagine" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "Indica se includere i percorsi delle immagini di backend nelle proprietà " "dell'immagine. Ad esempio, se viene utilizzato l'archivio del file system, " "l'URL \"file:///percorso/immagine\" viene restituito all'utente nel campo di " "metadati 'direct_url'. L'indicazione del percorso di archiviazione può " "rappresentare un rischio per la sicurezza, pertanto utilizzare questa " "impostazione con attenzione. L'impostazione su true sostituisce l'opzione " "show_image_direct_url." msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "Indica se includere l'ubicazione dell'archivio di immagini di backend nelle " "proprietà dell'immagine. La rivelazione dell'ubicazione dell'archivio può " "essere un rischio per la sicurezza, quindi utilizzare questa impostazione " "con cautela!" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "Indica se passare tramite le intestazioni che contengono le informazioni " "utente e tenant quando si effettuano richieste al registro. Ciò consente al " "registro di utilizzare il middleware di contesto senza il middleware " "auth_token di keystonemiddleware, rimuovendo le chiamate sul servizio " "keystone auth. Si raccomanda che quando si utilizza questa opzione, la " "comunicazione sicura tra l'api glance e il registro glance sia garantita da " "mezzi diversi dal middleware auth_token." msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "Se passare o meno attraverso il token utente quando si effettuano richieste " "al registro. Per impedire problemi con la scadenza del token durante il " "caricamento di file grandi, si consiglia di impostare questo parametro su " "False. Se \"use_user_token\" non è in vigore, è possibile specificare le " "credenziali admin. " msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "Directory di lavoro per le operazioni delle attività asincrone. La directory " "impostata in questo punto verrà utilizzata per eseguire operazioni sulle " "immagini - normalmente, prima che vengano importate nell'archivio di " "destinazione. Quando viene fornita la directory di lavoro, verificare che " "sia fornito spazio sufficiente per consentire di eseguire le attività " "simultanee in modo efficace senza esaurire lo spazio. È possibile fare una " "stima approssimativa moltiplicando il numero di `max_workers` - o il numero " "N di worker in esecuzione - per la dimensione media delle immagini (ad " "esempio, 500 MB). La stima della dimensione delle immagini deve essere " "eseguita in base alla dimensione media nella propria distribuzione. Notare " "che in base alle attività in esecuzione potrebbe essere necessario " "moltiplicare tale numero per un fattore che dipende dalle operazioni " "eseguite dall'attività. Ad esempio, è possibile che si desideri raddoppiare " "la dimensione disponibile se è abilitata la conversione dell'immagine. Fatta " "questa premessa, ricordare che queste sono solo stime, effettuate in base " "allo scenario peggiore; pertanto, è necessario essere preparati ad agire " "diversamente nel caso non siano corrette." #, python-format msgid "Wrong command structure: %s" msgstr "Struttura del comando errata: %s" msgid "You are not authenticated." msgstr "L'utente non è autenticato." msgid "You are not authorized to complete this action." msgstr "Non si è autorizzati a completare questa azione." #, python-format msgid "You are not authorized to lookup image %s." msgstr "Non si è autorizzati a ricercare l'immagine %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "Non si è autorizzati a ricercare i membri dell'immagine %s. " #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "" "L'utente non dispone dell'autorizzazione per creare un tag lo spazio dei " "nomi posseduto da '%s'" msgid "You are not permitted to create image members for the image." msgstr "Non si è autorizzati a creare membri dell'immagine per l'immagine." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Non si è autorizzati a creare immagini di proprietà di '%s'." #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "" "L'utente non dispone dell'autorizzazione per creare lo spazio dei nomi " "posseduto da '%s'" #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "" "L'utente non dispone dell'autorizzazione per creare l'oggetto posseduto da " "'%s'" #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "" "L'utente non dispone dell'autorizzazione per creare la proprietà posseduta " "da '%s'" #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "" "L'utente non dispone dell'autorizzazione per creare il tipo_risorsa " "posseduto da '%s'" #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "Non si è autorizzati a creare questa attività con proprietario: %s" msgid "You are not permitted to deactivate this image." msgstr "Non si è autorizzati a disattivare questa immagine." msgid "You are not permitted to delete this image." msgstr "Non si è autorizzati a eliminare questa immagine." msgid "You are not permitted to delete this meta_resource_type." msgstr "" "L'utente non dispone dell'autorizzazione per eliminare questo " "tipo_risorsa_metadati." msgid "You are not permitted to delete this namespace." msgstr "" "L'utente non dispone dell'autorizzazione per eliminare questo spazio dei " "nomi." msgid "You are not permitted to delete this object." msgstr "L'utente non dispone dell'autorizzazione per eliminare questo oggetto." msgid "You are not permitted to delete this property." msgstr "" "L'utente non dispone dell'autorizzazione per eliminare questa proprietà." msgid "You are not permitted to delete this tag." msgstr "L'utente non dispone dell'autorizzazione per eliminare questo tag." #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "Non si è autorizzati a modificare '%(attr)s' in questa %(resource)s." #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "Non si è autorizzati a modificare '%s' in questa immagine." msgid "You are not permitted to modify locations for this image." msgstr "Non si è autorizzati a modificare le ubicazioni per questa immagine." msgid "You are not permitted to modify tags on this image." msgstr "Non si è autorizzati a modificare i tag in questa immagine." msgid "You are not permitted to modify this image." msgstr "Non si è autorizzati a modificare questa immagine." msgid "You are not permitted to reactivate this image." msgstr "Non si è autorizzati a riattivare questa immagine." msgid "You are not permitted to set status on this task." msgstr "Non si è autorizzati ad impostare lo stato in questa attività." msgid "You are not permitted to update this namespace." msgstr "" "L'utente non dispone dell'autorizzazione per aggiornare questo spazio dei " "nomi." msgid "You are not permitted to update this object." msgstr "" "L'utente non dispone dell'autorizzazione per aggiornare questo oggetto." msgid "You are not permitted to update this property." msgstr "" "L'utente non dispone dell'autorizzazione per aggiornare questa proprietà." msgid "You are not permitted to update this tag." msgstr "L'utente non dispone dell'autorizzazione per aggiornare questo tag." msgid "You are not permitted to upload data for this image." msgstr "Non si è autorizzati a caricare i dati per questa immagine." #, python-format msgid "You cannot add image member for %s" msgstr "Non è possibile aggiungere il membro dell'immagine per %s" #, python-format msgid "You cannot delete image member for %s" msgstr "Non è possibile eliminare il membro dell'immagine per %s" #, python-format msgid "You cannot get image member for %s" msgstr "Non è possibile ottenere il membro dell'immagine per %s" #, python-format msgid "You cannot update image member %s" msgstr "Non è possibile aggiornare il membro dell'immagine %s" msgid "You do not own this image" msgstr "Non si possiede tale immagine" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Si è scelto di utilizzare nella connessione SSL ed è stato fornito un " "certificato, tuttavia non è stato fornito un parametro key_file o la " "variabile di ambiente GLANCE_CLIENT_KEY_FILE non è stata impostata" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Si è scelto di utilizzare SSL nella connessione e si è fornita una chiave, " "tuttavia non è stato fornito un parametro cert_file parameter o la variabile " "di ambiente GLANCE_CLIENT_CERT_FILE non è stata impostata" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() ha ricevuto l'argomento di parole chiave '%s' non previsto" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "Impossibile passare da %(current)s a %(next)s in fase di aggiornamento " "(richiesto from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "le proprietà personalizzate (%(props)s) sono in conflitto con le proprietà " "di base" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Su questa piattaforma non sono disponibili hub 'poll' e 'selects' eventlog" msgid "is_public must be None, True, or False" msgstr "is_public deve essere None, True, o False" msgid "limit param must be an integer" msgstr "parametro limite deve essere un numero intero" msgid "limit param must be positive" msgstr "parametro limite deve essere positivo" #, python-format msgid "location: %s data lost" msgstr "ubicazione: %s dati persi" msgid "md5 hash of image contents." msgstr "hash md5 del contenuto dell'immagine. " #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() ha ricevuto parole chiave %s non previste" msgid "protected must be True, or False" msgstr "protetto deve essere True o False" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "impossibile avviare %(serv)s. Si è verificato l'errore: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id è troppo lungo, dimensione max %s " glance-12.0.0/glance/locale/de/0000775000567000056710000000000012701407204017241 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/de/LC_MESSAGES/0000775000567000056710000000000012701407204021026 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/de/LC_MESSAGES/glance.po0000664000567000056710000035041312701407051022625 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Carsten Duch , 2014 # Ettore Atalan , 2014 # Laera Loris , 2013 # Robert Simai, 2014 # Frank Kloeker , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev1\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-17 15:26+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-16 08:38+0000\n" "Last-Translator: Tom Cocozzello \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s müssen Zeichenketten sein" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s ist erforderlich" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s dürfen nicht länger sein als %(length)i" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s dürfen nicht kürzer sein als %(length)i" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s muss dem Muster %(pattern)s entsprechen" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "Eine %(cls)s-Ausnahme ist im letzten RPC-Aufruf aufgetreten: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s in der Mitgliedsliste des Abbild %(i_id)s nicht gefunden." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) läuft..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s scheint bereits aktiv zu sein: %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s ist als Modul doppelt registriert. %(module)s wird nicht " "verwendet." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s von %(task_type)s sind nicht ordnungsgemäß konfiguriert. Laden " "des Dateisystemspeichers nicht möglich" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s von %(task_type)s sind nicht ordnungsgemäß konfiguriert. " "Fehlendes Arbeitsverzeichnis: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)sing %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(serv)s mit %(conf)s %(verb)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Geben Sie ein Host:Port-Paar an, wobei 'Host' eine IPv4-Adresse, eine " "IPv6-Adresse, ein Hostname oder ein vollständig qualifizierter Domänenname " "ist. Bei Verwendung einer IPv6-Adresse schließen Sie diese in Klammern ein, " "damit sie vom Port getrennt ist (d. h. \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s darf keine 4-Byte-Unicode-Zeichen enthalten." #, python-format msgid "%s is already stopped" msgstr "%s ist bereits gestoppt" #, python-format msgid "%s is stopped" msgstr "%s ist gestoppt" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "" "Wert von '%(param)s' außerhalb des gültigen Bereichs. Darf %(max)d nicht " "überschreiten" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "Option --os_auth_url oder Umgebungsvariable OS_AUTH_URL erforderlich, wenn " "die Keystone-Authentifizierungsstrategie aktiviert ist\n" msgid "A body is not expected with this request." msgstr "Es wird kein Body bei dieser Anforderung erwartet. " msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "Eine Liste der zugelassenen Artefakte im Format 'Name' oder 'Name-Version'. " "Wenn die Liste leer ist, kann jedes beliebige Artefakt geladen werden." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Ein Metadatendefinitionsobjekt namens %(object_name)s ist bereits in " "Namensbereich %(namespace_name)s nicht gefunden." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Eine Metadatendefinitionseigenschaft namens %(property_name)s ist bereits in " "Namensbereich %(namespace_name)s vorhanden. " #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Ein Ressourcentyp %(resource_type_name)s der Metadatendefinition ist bereits " "vorhanden. " #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "Ein Metadatentag namens %(name)s ist bereits in Namensbereich " "%(namespace_name)s nicht gefunden." msgid "A set of URLs to access the image file kept in external store" msgstr "URLs für den Zugriff auf die Abbilddatei im externen Speicher" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "AES-Schlüssel für die Verschlüsselung der Metadaten von Speicher 'location'. " "Dazu gehören, wenn verwendet, Swift- oder S3-Berechtigungsnachweise. Hierfür " "sollte eine zufällige Zeichenfolge mit der Länge 16, 24 oder 32 Byte " "festgelegt werden. " msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" "Adresse zum Binden des Servers. Nützlich bei der Auswahl einer bestimmten " "Netzwerkschnittstelle." msgid "Address to find the registry server." msgstr "Adresse des Registrierungsserver." msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "Erlaube nicht angemeldeten Benutzern nur-lese Zugriff auf das API. Dies " "trifft nur bei der Verwendung von ContextMiddleware zu." #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "" "Die zulässigen Werte %s sind unter den angegebenen Validatoren ungültig" msgid "Amount of disk space (in GB) required to boot image." msgstr "" "Menge an Plattenspeicher (in GB), die zum Booten des Abbildes erforderlich " "ist." msgid "Amount of ram (in MB) required to boot image." msgstr "" "Menge an Arbeitsspeicher (in MB), die zum Booten des Abbildes erforderlich " "ist." msgid "An identifier for the image" msgstr "Eine ID für das Abbild" msgid "An identifier for the image member (tenantId)" msgstr "Eine ID für das Abbildelement (tenantId)" msgid "An identifier for the owner of this task" msgstr "Eine ID für den Eigentümer diesen Tasks" msgid "An identifier for the task" msgstr "Eine ID für die Task" msgid "An image file url" msgstr "URL der Abbilddatei" msgid "An image schema url" msgstr "URL des Abbildschemas" msgid "An image self url" msgstr "'self'-URL für Abbild" #, python-format msgid "An image with identifier %s already exists" msgstr "Ein Abbild mit ID %s ist bereits vorhanden" msgid "An import task exception occurred" msgstr "Es ist eine Ausnahme bei einer Importtask eingetreten." msgid "An object with the same identifier already exists." msgstr "Ein Objekt mit der gleichen ID ist bereits vorhanden." msgid "An object with the same identifier is currently being operated on." msgstr "An einem Objekt mit dieser ID wird derzeit eine Operation ausgeführt. " msgid "An object with the specified identifier was not found." msgstr "Ein Objekt mit der angegebenen ID wurde nicht gefunden." msgid "An unknown exception occurred" msgstr "Eine unbekannte Ausnahme ist aufgetreten" msgid "An unknown task exception occurred" msgstr "Eine unbekannte Taskausnahme ist aufgetreten" #, python-format msgid "Array has no element at position %d" msgstr "Array enthält kein Element an Position %d" msgid "Array property can't have item_type=Array" msgstr "" "Eigenschaft für Platteneinheit kann nicht über item_type=Array verfügen" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "Artefakt %s konnte nicht gelöscht werden, da es verwendet wird: %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "Das Artefakt kann den Status nicht von %(source)s in %(target)s ändern" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "Artefakt überschreitet die Speicherkontingent: %s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "Das Artefakt verfügt über keine Eigenschaft %(prop)s" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "Artefaktstatus kann nicht von %(curr)s in %(to)s geändert werden" #, python-format msgid "Artifact storage media is full: %s" msgstr "Artefaktspeichermedium ist voll: %s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "" "Artefakttyp mit dem Namen '%(name)s' und der Version '%(version)s' ist " "unbekannt" msgid "Artifact with a circular dependency can not be created" msgstr "Artefakt mit einer Schleifenabhängigkeit kann nicht erstellt werden" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "Artefakt mit ID %(id)s ist nicht zugänglich" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "Artefakt mit ID %(id)s wurde nicht gefunden" msgid "Artifact with the specified type, name and version already exists" msgstr "" "Ein Artefakt mit dem angegebenen Typ und Namen und der angegebenen Version " "ist bereits vorhanden" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "Das Artefakt mit dem angegebenen Typ und Namen und der angegebenen Version " "verfügt bereits über die direkte Abhängigkeit %(dep)s" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "Das Artefakt mit dem angegebenen Typ und Namen und der angegebenen Version " "verfügt bereits über die transitive Abhängigkeit %(dep)s" msgid "Attempt to set readonly property" msgstr "Versuch, eine schreibgeschützte Eigenschaft festzulegen" msgid "Attempt to set value of immutable property" msgstr "Versuch, den Wert einer nicht veränderbaren Eigenschaft festzulegen" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "Versuch doppeltes Abbild hochzuladen: %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "Versuch, Adressfeld für ein Abbild zu aktualisieren, das sich nicht im " "Warteschlangenmodus befindet." #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "Attribut '%(property)s' ist schreibgeschützt." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "Attribut '%(property)s' ist reserviert." #, python-format msgid "Attribute '%s' is read-only." msgstr "Attribut '%s' ist schreibgeschützt." #, python-format msgid "Attribute '%s' is reserved." msgstr "Attribut '%s' ist reserviert." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "Attribut 'container_format' kann nur durch ein Abbild in der Warteschlange " "ersetzt werden. " msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "Attribut 'disk_format' kann nur durch ein Abbild in der Warteschlange " "ersetzt werden. " msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" "Authentifizierungsschlüssel für den Benutzer, der beim Swift-" "Authentifizierungsservice authentifiziert wird. (veraltet)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Authentifizierungsservice unter URL %(url)s nicht gefunden." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Authentifizierungsfehler: Das Token ist möglicherweise beim Hochladen der " "Datei abgelaufen. Die Abbilddaten für %s werden gelöscht." msgid "Authorization failed." msgstr "Authorisierung fehlgeschlagen." msgid "Available categories:" msgstr "Verfügbare Kategorien:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Falsches \"%s\"-Abfragefilterformat. Verwenden Sie die ISO 8601 DateTime-" "Notation." #, python-format msgid "Bad Command: %s" msgstr "Fehlerhaftes Kommando: %s" #, python-format msgid "Bad header: %(header_name)s" msgstr "Fehlerhafter Header: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "Falscher an Filter %(filter)s übergebener Wert hat %(val)s abgerufen" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "Falsches Format der S3 URI: %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Fehlerhafter Berechtigungsnachweis '%(creds)s' in Swift-URI" msgid "Badly formed credentials in Swift URI." msgstr "Fehlerhafter Berechtigungsnachweis in Swift-URI." msgid "Base directory that the image cache uses." msgstr "Stammverzeichnis für den Abbild-Zwischenspeicher." msgid "BinaryObject property cannot be declared mutable" msgstr "" "Die BinaryObject-Eigenschaft kann nicht als veränderbar deklariert werden. " #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "" "Das Blob-Objekt %(name)s verfügt möglicherweise nicht über mehrere Werte" msgid "Blob size is not set" msgstr "Blob-Objekt-Größe nicht festgelegt" msgid "Body expected in request." msgstr "Text in Anforderung erwartet." msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "Datei und legacy_image_id dürfen nicht gleichzeitig angegeben werden" msgid "CA certificate file to use to verify connecting clients." msgstr "CA-Zertifikatsdatei zur Überprüfung der sich verbindenden Clients." msgid "Cannot be a negative value" msgstr "Darf kein negativer Wert sein" msgid "Cannot be a negative value." msgstr "Darf kein negativer Wert sein." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "" "Abbild %(key)s '%(value)s' kann nicht in eine Ganzzahl konvertiert werden. " msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "" "Es kann keine Artefakteigenschaft mit dem reservierten Namen 'metadata' " "deklariert werden" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "Das Artefakt '%(name)s' kann nicht geladen werden" msgid "Cannot remove last location in the image." msgstr "Die letzte Position im Abbild kann nicht entfernt werden. " #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "" "Daten für Abbild %(image_id)s können nicht gespeichert werden: %(error)s" msgid "Cannot set locations to empty list." msgstr "Positionen können nicht auf leere Liste gesetzt werden. " msgid "Cannot specify 'max_size' explicitly" msgstr "'max_size' kann nicht explizit angegeben werden" msgid "Cannot specify 'min_size' explicitly" msgstr "'min_size' kann nicht explizit angegeben werden" msgid "Cannot upload to an unqueued image" msgstr "" "Hochladen auf Abbild, das sich nicht in Warteschlange befindet, nicht möglich" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "Dieser Parameter kann nicht mit dem Operator %(op)s verwendet werden" msgid "Certificate file to use when starting API server securely." msgstr "Zertifikatsdatei zum sicheren Start des API-Servers." #, python-format msgid "Certificate format not supported: %s" msgstr "Das Zertifikatformat wird nicht unterstützt: %s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "Das Zertifikat ist nicht mehr gültig ab: %s UTC" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "Das Zertifikat ist erst gültig ab: %s UTC" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Verifizierung von Kontrollsumme fehlgeschlagen. Zwischenspeichern von Image " "'%s' abgebrochen." msgid "Client disconnected before sending all data to backend" msgstr "" "Die Verbindung zum Client wurde beendet, bevor alle Daten zum Backend " "geschickt wurden" msgid "Command not found" msgstr "Kommando nicht gefunden" msgid "Configuration option was not valid" msgstr "Konfigurationsoption war nicht gültig" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Verbindungsfehler/fehlerhafte Anforderung an Authentifizierungsservice unter " "URL %(url)s." #, python-format msgid "Constructed URL: %s" msgstr "Erstellte URL: %s" msgid "Container format is not specified." msgstr "Containerformat wurde nicht angegeben." msgid "Content-Type must be application/octet-stream" msgstr "Inhaltstyp muss Anwendungs-/Oktet-Stream sein" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "Fehlerhafter Abbild-Download für Abbild %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Keine Bindung an %(host)s:%(port)s möglich nach Versuch über 30 Sekunden" msgid "Could not find OVF file in OVA archive file." msgstr "Es wurde keine OVF-Datei in der OVA-Archivdatei gefunden. " #, python-format msgid "Could not find metadata object %s" msgstr "Metadatenobjekt %s konnte nicht gefunden werden" #, python-format msgid "Could not find metadata tag %s" msgstr "Metadatenschlagwort %s konnte nicht gefunden werden" #, python-format msgid "Could not find namespace %s" msgstr "Namensbereich %s konnte nicht gefunden werden" #, python-format msgid "Could not find property %s" msgstr "Eigenschaft %s konnte nicht gefunden werden" msgid "Could not find required configuration option" msgstr "Erforderliche Konfigurationsoption konnte nicht gefunden werden" #, python-format msgid "Could not find task %s" msgstr "Task %s konnte nicht gefunden werden" #, python-format msgid "Could not update image: %s" msgstr "Abbild konnte nicht aktualisiert werden: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "Zurzeit werden OVA-Pakete mit mehreren Platten nicht unterstützt. " msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "Benutzerdefinierte Liste der Validatoren sollte Tupel enthalten '(function, " "message)'" #, python-format msgid "Data for image_id not found: %s" msgstr "Daten für image_id nicht gefunden: %s" msgid "Data supplied was not valid." msgstr "Angegebene Daten waren nicht gültig." msgid "Date and time of image member creation" msgstr "Datum und Uhrzeit der Erstellung des Abbildelements" msgid "Date and time of image registration" msgstr "Datum und Uhrzeit der Abbildregistrierung " msgid "Date and time of last modification of image member" msgstr "Datum und Uhrzeit der letzten Änderung des Abbildelements" msgid "Date and time of namespace creation" msgstr "Datum und Uhrzeit der Erstellung des Namensbereichs" msgid "Date and time of object creation" msgstr "Datum und Uhrzeit der Objekterstellung" msgid "Date and time of resource type association" msgstr "Datum und Uhrzeit der Ressourcentypzuordnung" msgid "Date and time of tag creation" msgstr "Datum und Uhrzeit der Erstellung des Schlagwortes" msgid "Date and time of the last image modification" msgstr "Datum und Uhrzeit der letzten Abbildänderung" msgid "Date and time of the last namespace modification" msgstr "Datum und Uhrzeit der letzten Änderung des Namensbereichs" msgid "Date and time of the last object modification" msgstr "Datum und Uhrzeit der letzten Objektänderung" msgid "Date and time of the last resource type association modification" msgstr "Datum und Uhrzeit der letzten Änderung der Ressourcentypzuordnung" msgid "Date and time of the last tag modification" msgstr "Datum und Uhrzeit der letzten Schlagwortänderung" msgid "Datetime when this resource was created" msgstr "Datum/Uhrzeit der Erstellung dieser Ressource" msgid "Datetime when this resource was updated" msgstr "Datum/Uhrzeit der Aktualisierung dieser Ressource" msgid "Datetime when this resource would be subject to removal" msgstr "Datum/Uhrzeit, zu dem/der diese Ressource entfernt werden würde" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "Standardwert für die Anzahl an Elementen, die von einer Anforderung " "zurückgegeben werden, wenn in der Anforderung keine explizite Angabe gemacht " "wird" msgid "Default value is invalid" msgstr "Standardwert ist ungültig" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "" "Versuch, das Artefakt hochzuladen, wird verweigert, weil es das Kontingent " "überschreitet: %s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" "Versuch, das Abbild hochzuladen, wird verweigert, weil es das Kontingent " "überschreitet: %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "" "Versuch, Abbild hochzuladen, das größer ist als %d Bytes, wird nicht " "zugelassen." #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "Abhängigkeitseigenschaft '%s' muss zuerst gelöscht werden" msgid "Dependency relations cannot be mutable" msgstr "Abhängigkeitsbeziehungen können nicht veränderbar sein" msgid "Deploy the v1 OpenStack Images API." msgstr "OpenStack Abbild-API Version 1 implementieren. " msgid "Deploy the v1 OpenStack Registry API." msgstr "OpenStack Registry-API Version 1 implementieren." msgid "Deploy the v2 OpenStack Images API." msgstr "OpenStack Abbild-API Version 2 implementieren. " msgid "Deploy the v2 OpenStack Registry API." msgstr "OpenStack-Registry-API Version 2 implementieren." msgid "Descriptive name for the image" msgstr "Beschreibender Name für das Abbild" msgid "Dictionary contains unexpected key(s)" msgstr "Verzeichnis enthält unerwartete Schlüssel" msgid "Dictionary size is greater than maximum" msgstr "Verzeichnisgröße überschreitet den Maximalwert" msgid "Dictionary size is less than minimum" msgstr "Verzeichnisgröße unterschreitet den Mindestwert" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "Der Prüfsummenalgorithmus, der für die digitale Signatur verwendet wird. " "Verwenden Sie den Befehl \"openssl list-message-digest-algorithms\", um die " "von der auf der Plattform verwendeten OpenSSL-Version unterstützten, " "verfügbaren Algorithmen abzurufen. Beispiele: \"sha1\", \"sha256\", " "\"sha512\", usw." msgid "Disk format is not specified." msgstr "Plattenformat wurde nicht angegeben." msgid "Does not match pattern" msgstr "Entspricht nicht dem Muster" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Treiber %(driver_name)s konnte nicht ordnungsgemäß konfiguriert werden. " "Grund: %(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "Entweder eine Datei oder eine legacy_image_id muss angegeben werden" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Fehler beim Entschlüsseln Ihrer Anforderung. Entweder die URL oder der " "angeforderte Body enthalten Zeichen, die von Glance nicht entschlüsselt " "werden konnten. " #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "" "Fehler beim Abrufen der Mitglieder von Abbild %(image_id)s: %(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "" "Fehler in Speicherkonfiguration. Hinzufügen von Artefakten zu Speicher ist " "inaktiviert." msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Fehler in Speicherkonfiguration. Hinzufügen von Abbildern zu Speicher ist " "inaktiviert." msgid "Error occurred while creating the verifier" msgstr "Beim Erstellen der Prüffunktion ist ein Fehler aufgetreten." msgid "Error occurred while verifying the signature" msgstr "Beim Verifizieren der Signatur ist ein Fehler aufgetreten." msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "" "Mitglied mit Angabe im folgenden Format erwartet: {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "" "Status mit Angabe im folgenden Format erwartet: {\"status\": \"status\"}" msgid "External source should not be empty" msgstr "Externe Quelle darf nicht leer sein." #, python-format msgid "External sources are not supported: '%s'" msgstr "Externe Quellen werden nicht unterstützt: '%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "Abbild wurde nicht aktiviert. Fehler: %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "Abbildmetadaten wurden nicht hinzugefügt. Fehler: %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "Zu löschendes Artefakt %(artifact_id)s wurde nicht gefunden" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Zu löschendes Abbild %(image_id)s wurde nicht gefunden" #, python-format msgid "Failed to find image to delete: %s" msgstr "Zu löschendes Abbild wurde nicht gefunden: %s" #, python-format msgid "Failed to find image to update: %s" msgstr "Zu aktualisierendes Abbild wurde nicht gefunden: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Zu löschender Ressourcentyp %(resourcetype)s wurde nicht gefunden" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "Die Image-Zwischenspeicherdatenbank wurde nicht initialisiert. Fehler: %s" #, python-format msgid "Failed to read %s from config" msgstr "Fehler beim Lesen von %s aus Konfiguration" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "Abbild wurde nicht reserviert. Fehler: %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "Abbildmetadaten wurden nicht aktualisiert. Fehler: %s" #, python-format msgid "Failed to upload image %s" msgstr "Fehler beim Hochladen des Abbildes %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Fehler beim Hochladen von Abbilddaten für Abbild %(image_id)s wegen HTTP-" "Fehler: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Fehler beim Hochladen der Abbilddaten für das Abbild %(image_id)s auf Grund " "eines internen Fehlers: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "Datei %(path)s hat ungültige Sicherungsdatei %(bfile)s. Abbruch." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Dateibasierte Importe sind nicht zulässig. Verwenden Sie eine " "Imagedatenquelle, die nicht lokal ist." msgid "File too large" msgstr "Datei zu groß" msgid "File too small" msgstr "Datei zu klein" msgid "Forbidden image access" msgstr "Unzulässiger Zugriff auf Abbild" #, python-format msgid "Forbidden to delete a %s image." msgstr "Es ist nicht erlaubt, ein %s Abbild zu löschen." #, python-format msgid "Forbidden to delete image: %s" msgstr "Löschen von Abbild nicht erlaubt: %s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "Es ist nicht erlaubt, '%(key)s' des %(status)s-Abbild zu ändern." #, python-format msgid "Forbidden to modify '%s' of image." msgstr "Ändern von '%s' eines Abbild nicht erlaubt." msgid "Forbidden to reserve image." msgstr "Reservieren von Abbild nicht erlaubt." msgid "Forbidden to update deleted image." msgstr "Aktualisieren von gelöschtem Abbild nicht erlaubt." #, python-format msgid "Forbidden to update image: %s" msgstr "Aktualisieren von Abbild nicht erlaubt: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "Unerlaubter Uploadversuch: %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Anforderung wird verboten, Metadatendefinitionsnamensbereich %s ist nicht " "sichtbar. " #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Anforderung wird nicht zugelassen, Task %s ist nicht sichtbar" msgid "Format of the container" msgstr "Format des Containers" msgid "Format of the disk" msgstr "Format der Festplatte" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "Abrufen der Daten von BLOB %(name)s fehlgeschlagen: %(err)s." #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "Abrufen der Daten des Abbildes %(id)s fehlgeschlagen: %(err)s." msgid "Glance client not installed" msgstr "Glance-Client ist nicht installiert." #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" ist nicht gültig." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host und Port \"%s\" ist nicht gültig." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Informationsnachricht in Klarschrift nur eingeschlossen, wenn zweckdienlich " "(in der Regel bei einem Fehler)" msgid "If False doesn't trace SQL requests." msgstr "Bei 'False' erfolgt keine Traceerstellung für SQL-Anforderungen." msgid "If False fully disable profiling feature." msgstr "Bei 'False' die Profilerstellungsfunktion vollständig deaktivieren." msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "Bei 'False' gibt der Server den Header \"Connection: close\", bei 'True' den " "Wert \"Connection: Keep-Alive\" in seinen Antworten zurück. Um die Client-" "Socket-Verbindung explizit zu schließen, nachdem die Antwort gesendet und " "erfolgreich vom Client gelesen wurde, müssen Sie einfach diese Option auf " "'False' setzen, wenn Sie einen wsgi-Server erstellen. " msgid "If true, image will not be deletable." msgstr "Bei 'true' kann das Abbild nicht gelöscht werden." msgid "If true, namespace will not be deletable." msgstr "Bei 'true' kann der Namensbereich nicht gelöscht werden." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "" "Abbild %(id)s konnte nicht gelöscht werden, da es verwendet wird: %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "Abbild %(id)s nicht gefunden" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Abbild %(image_id)s wurde nach dem Upload nicht gefunden. Das Abbild wurde " "möglicherweise während des Uploads gelöscht: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "Abbild %(image_id)s ist geschützt und kann nicht gelöscht werden." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "Abbild %s konnte nach dem Upload nicht gefunden werden. Das Abbild wurde " "möglicherweise beim Upload gelöscht. Die hochgeladenen Blöcke werden " "bereinigt." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "" "Abbild %s konnte nach dem Hochladen nicht gefunden werden. Das Abbild ist " "möglicherweise beim Hochladen gelöscht worden." #, python-format msgid "Image %s is deactivated" msgstr "Abbild %s ist deaktiviert" #, python-format msgid "Image %s is not active" msgstr "Abbild %s ist nicht aktiv" #, python-format msgid "Image %s not found." msgstr "Abbild %s nicht gefunden." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "Das Abbild übersteigt das vorhandene Speicherkontingent: %s" msgid "Image id is required." msgstr "Abbild-ID ist erforderlich." msgid "Image is protected" msgstr "Abbild ist geschützt" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "Grenzwert für Abbildmitglieder für Abbild %(id)s überschritten: %(e)s:" #, python-format msgid "Image name too long: %d" msgstr "Abbildname zu lang: %d" msgid "Image operation conflicts" msgstr "Abbildoperationskonflikte" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Abbild-Statusänderung von %(cur_status)s nach %(new_status)s ist nicht " "erlaubt" #, python-format msgid "Image storage media is full: %s" msgstr "Datenträger zum Speichern des Abbildes ist voll: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Grenzwert für Abbildschlagwort für Abbild %(id)s überschritten: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "Problem beim Abbildupload: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "Abbild mit ID %s ist bereits vorhanden!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "Abbild mit ID %s wurde gelöscht. " #, python-format msgid "Image with identifier %s not found" msgstr "Abbild mit ID %s nicht gefunden" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "Abbild mit der angegebenen ID %(image_id)s wurde nicht gefunden" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Falsche Authentifizierungsstrategie. Erwartet wurde \"%(expected)s\", " "empfangen wurde jedoch \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Falsche Anforderung: %s" msgid "Index is out of range" msgstr "Index liegt außerhalb des gültigen Bereichs" msgid "Index is required" msgstr "Index ist erforderlich" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "Eingabe enthält nicht das Feld '%(key)s' " #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "Unzureichende Berechtigungen für Artefaktspeichermedium: %s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Nicht ausreichende Berechtigungen auf Abbildspeichermedien: %s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "Ungültiger Inhaltstyp für die Verwendung mit %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Ungültiger JSON Zeiger für diese Ressource: : '/%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "Ungültiges Zertifikatformat: %s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "Ungültige Kontrollsumme '%s': Darf 32 Zeichen nicht überschreiten" msgid "Invalid configuration in glance-swift conf file." msgstr "Ungültige Konfiguration in der Glance-Swift-Konfigurationsdatei." msgid "Invalid configuration in property protection file." msgstr "Ungültige Konfiguration in Eigenschaftsschutzdatei. " #, python-format msgid "Invalid container format '%s' for image." msgstr "Ungültiges Containerformat '%s' für Abbild." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Ungültiger Inhaltstyp %(content_type)s" msgid "Invalid dict property type" msgstr "Ungültiger dict-Eigenschaftstyp" msgid "Invalid dict property type specification" msgstr "Ungültiger dict-Eigenschaftstyp angegeben" #, python-format msgid "Invalid disk format '%s' for image." msgstr "Ungültiges Plattenformat '%s' für Abbild." #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Ungültiger Filterwert %s. Das schließende Anführungszeichen fehlt." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Ungültiger Filterwert %s. Vor dem schließenden Anführungszeichen ist kein " "Komma." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Ungültiger Filterwert %s. Vor dem öffnenden Anführungszeichen ist kein Komma." #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "Ungültige Header \"Content-Type\": %s" msgid "Invalid image id format" msgstr "Ungültiges Abbild-ID-Format" msgid "Invalid item type specification" msgstr "Ungültiger Elementtyp angegeben" #, python-format msgid "Invalid json body: %s" msgstr "Ungültiger JSON-Body: %s" msgid "Invalid jsonpatch request" msgstr "Ungültige jsonpatch-Anforderung" msgid "Invalid location" msgstr "Ungültige Position" #, python-format msgid "Invalid location %s" msgstr "Ungültige Position %s" #, python-format msgid "Invalid location: %s" msgstr "Ungültiger Ort: %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "Ungültige location_strategy Option: %(name)s. Gültige Optionen sind: " "%(strategies)s" msgid "Invalid locations" msgstr "Ungültige Positionen" #, python-format msgid "Invalid locations: %s" msgstr "Unbekannte Stellen: %s" msgid "Invalid marker format" msgstr "Ungültiges Markerformat" msgid "Invalid marker. Image could not be found." msgstr "Ungültiger Marker. Abbild konnte nicht gefunden werden." #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "Ungültiger mask_gen_algorithm: %s" #, python-format msgid "Invalid membership association: %s" msgstr "Ungültige Mitgliedschaftszuordnung: %s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "Ungültige Kombination von Platten- und Containerformaten. Beim Festlegen " "eines Platten- oder Containerformats auf 'aki', 'ari' oder 'ami' müssen die " "Container- und Plattenformate übereinstimmen." #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Ungültige Operation: '%(op)s'. Es muss eine der folgenden Optionen verwendet " "werden: %(available)s." msgid "Invalid position for adding a location." msgstr "Ungültige Position zum Hinzufügen einer Position." msgid "Invalid position for removing a location." msgstr "Ungültige Stelle zum Entfernen einer Position." msgid "Invalid property definition" msgstr "Ungültige Eigenschaftsdefinition" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "Ungültige pss_salt_length: %s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "Ungültiger öffentlicher Schlüsseltyp für den Signaturschlüsseltyp: %s" msgid "Invalid reference list specification" msgstr "Ungültige Referenzlistenspezifikation" msgid "Invalid referenced type" msgstr "Ungültiger referenzierter Typ" msgid "Invalid request PATCH for work with blob" msgstr "Ungültiger Anforderungspatch für die Verwendung mit blob" msgid "Invalid service catalog json." msgstr "Ungültige Servicekatalog-JSON." #, python-format msgid "Invalid signature hash method: %s" msgstr "Ungültige Signaturhashmethode: %s" #, python-format msgid "Invalid signature key type: %s" msgstr "Ungültiger Signaturschlüsseltyp: %s" #, python-format msgid "Invalid sort direction: %s" msgstr "Ungültige Sortierrichtung: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "Ungültiger Sortierschlüssel: %(sort_key)s. Wenn die Typversion nicht " "festgelegt wurde, muss eine der folgenden Optionen verwendet werden: " "%(available)s." #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Ungültiger Sortierschlüssel: %(sort_key)s. Es muss einer der folgenden sein: " "%(available)s." #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" "Ungültiger Sortierschlüssel: %(sort_key)s. Sie können nicht nach dieser " "Eigenschaft sortieren" #, python-format msgid "Invalid status value: %s" msgstr "Ungültiger Statuswert: %s" #, python-format msgid "Invalid status: %s" msgstr "Ungültiger Status: %s" #, python-format msgid "Invalid time format for %s." msgstr "Ungültiges Zeitformat für %s." msgid "Invalid type definition" msgstr "Ungültige Typdefinition" #, python-format msgid "Invalid type value: %s" msgstr "Ungültiger Wert für Typ: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Ungültige Aktualisierung. Sie würde zu einer doppelten " "Metadatendefinitionseigenschaft mit demselben Namen wie %s führen" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Ungültige Aktualisierung. Sie wurde zu einem doppelten " "Metadatendefinitionsobjekt mit demselben Namen %(name)s im Namensbereich " "%(namespace_name)s führen." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Ungültige Aktualisierung. Sie wurde zu einem doppelten " "Metadatendefinitionsobjekt mit demselben Namen %(name)s im Namensbereich " "%(namespace_name)s führen." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Ungültige Aktualisierung. Sie würde zu einer doppelten " "Metadatendefinitionseigenschaft mit demselben Namen %(name)s im " "Namensbereich %(namespace_name)s führen. " #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "Ungültiger Wert '%(value)s' für Parameter '%(param)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Ungültiger Wert für Option %(option)s: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Ungültiger Sichtbarkeitswert: %s" msgid "Is not allowed value" msgstr "Ist kein zulässiger Wert" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "Das Eventlet-Modul scheint vor der Einstellung %s='yes' importiert worden zu " "sein. Im Moment ist die Inaktivierung von eventlet.greendns erforderlich, " "wenn IPv6 verwendet wird, da eventlet.greendns derzeit bei IPv6-Adressen " "unterbricht. Stellen Sie sicher, dass Eventlet erst importiert wird, wenn " "dies festgelegt wurde." msgid "It's invalid to provide multiple image sources." msgstr "Die Angabe von mehreren Abbildquellen ist ungültig." msgid "It's not allowed to add locations if locations are invisible." msgstr "" "Es ist nicht zulässig, Positionen hinzuzufügen, wenn die Positionen nicht " "sichtbar sind. " msgid "It's not allowed to remove locations if locations are invisible." msgstr "" "Es ist nicht zulässig, Positionen zu entfernen, wenn die Positionen nicht " "sichtbar sind. " msgid "It's not allowed to update locations if locations are invisible." msgstr "" "Es ist nicht zulässig, Positionen zu aktualisieren, wenn die Positionen " "nicht sichtbar sind. " msgid "Items have to be unique" msgstr "Elemente müssen eindeutig sein" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "Json-Pfad sollte mit einem '/' beginnen und ohne '/' enden. Zwei " "aufeinanderfolgende '/' sind nicht zulässig." msgid "Legacy image was not found" msgstr "Traditionelles Abbild nicht gefunden" msgid "Length is greater than maximum" msgstr "Die Länge überschreitet den Maximalwert" msgid "Length is less than minimum" msgstr "Die Länge unterschreitet den Mindestwert" msgid "Limit param must be an integer" msgstr "Grenzwertparameter muss eine ganze Zahl sein" msgid "Limit param must be positive" msgstr "Grenzwertparameter muss positiv sein" #, python-format msgid "Limit param must not be higher than %d" msgstr "Grenzwertparameter darf nicht größer sein als %d" msgid "Limits request ID length." msgstr "Begrenzt die Länge der Anforderungs-ID." msgid "List definitions may hot have defaults" msgstr "Listendefinitionen verfügen möglicherweise über keine Standardwerte" msgid "List of strings related to the image" msgstr "Liste mit dem Abbild zugehörigen Zeichenketten" msgid "List size is greater than maximum" msgstr "Listengröße überschreitet den Maximalwert" msgid "List size is less than minimum" msgstr "Listengröße unterschreitet den Mindestwert" msgid "Loop time between checking for new items to schedule for delete." msgstr "" "Schleifenzeit zwischen dem Überprüfen auf neue Elemente, deren Löschung zu " "planen ist. " #, python-format msgid "Malformed Content-Range header: %s" msgstr "Fehlerhafter Content-Range-Header: %s" msgid "Malformed JSON in request body." msgstr "Fehlerhafte JSON in Anforderungshauptteil." msgid "Max string length may not exceed 255 characters" msgstr "Die maximale Zeichenlänge darf 255 Zeichen nicht überschreiten" msgid "Maximal age is count of days since epoch." msgstr "Das maximale Alter entspricht der Anzahl von Tagen seit der Epoche." msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "Maximal zu akzeptierende Zeilenlänge für Nachrichtenheader. max_header_line " "muss möglicherweise erhöht werden, wenn größere Tokens verwendet werden (in " "der Regel diejenigen, die von der Keystone-v3-API mit großen " "Servicekatalogen generiert werden)" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" "Maximale Anzahl der Abbildmitglieder pro Abbild. Negative Werte bedeuten " "unbegrenzt." msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Maximale Anzahl der für ein Abbild zulässigen Positionen. Unbegrenzt, wenn " "Werte negativ sind. " msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "" "Maximale Anzahl der für ein Abbild zulässigen Eigenschaften. Unbegrenzt, " "wenn Werte negativ sind. " msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Maximale Anzahl der für ein Abbild zulässige Schlagwörter. Negative Werte " "bedeuten unbegrenzt." msgid "Maximum permissible number of items that could be returned by a request" msgstr "" "Maximal zulässige Anzahl an Elementen, die von einer Anforderung " "zurückgegeben werden könnten" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Das Maximum an Umleitungen (%(redirects)s) wurde überschritten." msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "Maximale Größe des Abbildes, das ein Benutzer hochladen kann, in Bytes. Der " "Standardwert ist 1099511627776 Byte (1 TB). WARNUNG: Dieser Wert sollte nur " "nach sorgfältiger Überlegung erhöht werden und muss unter 8 EB " "(9223372036854775808) liegen." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Mitglied %(member_id)s ist für Abbild %(image_id)s doppelt vorhanden" msgid "Member can't be empty" msgstr "Mitglied darf nicht leer sein" msgid "Member to be added not specified" msgstr "Hinzuzufügendes Element nicht angegeben" msgid "Membership could not be found." msgstr "Mitgliedschaft konnte nicht gefunden werden." #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "Der Metadatendefinitionsnamensbereich %(namespace)s ist geschützt und kann " "nicht gelöscht werden." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "Metadatendefinitionsnamensbereich für id=%s nicht gefunden" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "" "Der Metadatendefinitionsnamensbereich %(namespace_name)s wurde nicht " "gefunden. " #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "Das Metadatendefinitionsobjekt %(object_name)s ist geschützt und kann nicht " "gelöscht werden." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "Metadatendefinitionsobjekt für id=%s nicht gefunden" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "Die Metadatendefinitionseigenschaft %(property_name)s ist geschützt und kann " "nicht gelöscht werden. " #, python-format msgid "Metadata definition property not found for id=%s" msgstr "Metadatendefinitionseigenschaft für id=%s nicht gefunden" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Der Ressourcentyp %(resource_type_name)s der Metadatendefinition ist ein " "Basisdaten-Systemtyp und kann nicht gelöscht werden. " #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "Die Ressourcentypzuordnung %(resource_type)s der Metadatendefinition ist " "geschützt und kann nicht gelöscht werden." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "Der Metadatendefinitionstag %(tag_name)s ist geschützt und kann nicht " "gelöscht werden." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "Metadatendefinitionstag für id=%s nicht gefunden" msgid "Min string length may not be negative" msgstr "Die minimale Zeichenlänge darf keinen negativen Wert aufweisen" msgid "Minimal rows limit is 1." msgstr "Der Wert für die Mindestzeilenanzahl ist 1." #, python-format msgid "Missing required credential: %(required)s" msgstr "Erforderlicher Berechtigungsnachweis fehlt: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Mehrere 'image'-Serviceübereinstimmungen für Region %(region)s. Dies weist " "im Allgemeinen darauf hin, dass eine Region erforderlich ist und dass Sie " "keine angegeben haben." msgid "Must supply a positive, non-zero value for age." msgstr "Für age muss ein positiver Wert ungleich null angegeben werden." msgid "Name of the paste configuration file." msgstr "Name der einzufügenden Konfigurationsdatei." #, python-format msgid "No artifact found with ID %s" msgstr "Kein Artefakt mit ID %s gefunden" msgid "No authenticated user" msgstr "Kein authentifizierter Benutzer" #, python-format msgid "No image found with ID %s" msgstr "Es wurde kein Abbild mit der ID %s gefunden" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "Keine Position mit ID %(loc)s von Abbild %(img)s gefunden" msgid "No permission to share that image" msgstr "Keine Berechtigung dieses Abbild freizugeben" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "Kein Plugin für '%(name)s' wurde geladen" msgid "No property to access" msgstr "Kein Zugriff auf Eigenschaft" #, python-format msgid "No such key '%s' in a dict" msgstr "Kein solcher Schlüssel '%s' in einem Wörterverzeichnis" #, python-format msgid "Not a blob property '%s'" msgstr "Keine BLOB-Eigenschaft '%s'" msgid "Not a downloadable entity" msgstr "Keine für den Download verfügbare Entität" msgid "Not a list property" msgstr "Keine Listeneigenschaft" #, python-format msgid "Not a list property '%s'" msgstr "Keine Listeneigenschaft '%s'" msgid "Not a valid value type" msgstr "Kein gültiger Werttyp" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "Nicht alle Abhängigkeiten befinden sich im Status '%s'" #, python-format msgid "Not allowed to create members for image %s." msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu erstellen." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Deaktivieren des Abbild im Status '%s' nicht zulässig" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu löschen." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Es ist nicht zulässig, Schlagwörter für Abbild %s zu löschen." #, python-format msgid "Not allowed to list members for image %s." msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s aufzulisten." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Erneutes Aktivieren des Abbildes im Status '%s' nicht zulässig" #, python-format msgid "Not allowed to update members for image %s." msgstr "Es ist nicht zulässig, Mitglieder für Abbild %s zu aktualisieren." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Es ist nicht zulässig, Schlagwörter für Abbild %s zu aktualisieren." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "Hochladen von Abbilddaten für Abbild %(image_id)s nicht zulässig: %(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "Kein Array-IDX '%s'" msgid "Number of sort dirs does not match the number of sort keys" msgstr "" "Die Anzahl der Sortierverzeichnisse entspricht nicht der Anzahl der " "Sortierschlüssel" msgid "OVA extract is limited to admin" msgstr "OVA-Extraktion kann nur vom Administrator ausgeführt werden." msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "Die für OVF relevanten Metadaten wurden nicht in der Konfigurationsdatei " "\"ovf-metadata.json\" angegeben. Setzen Sie \"cim_pasd\" auf eine Liste der " "relevanten CIM_ProcessorAllocationSettingData-Eigenschaften." msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "" "Die OVF-Eigenschaftenkonfigurationsdatei \"ovf-metadata.json\" wurde nicht " "gefunden." msgid "Old and new sorting syntax cannot be combined" msgstr "Die alte und die neue Sortiersyntax können nicht kombiniert werden" msgid "Only list indexes are allowed for blob lists" msgstr "Für BLOB-Listen sind nur Listenindizes zulässig" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "Operation \"%s\" erfordert ein Element mit der Bezeichnung \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Operationsobjekte müssen genau ein Element mit der Bezeichnung \"add\", " "\"remove\" oder \"replace\" enthalten." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Operationsobjekte dürfen nur ein Element mit der Bezeichnung \"add\", " "\"remove\" oder \"replace\" enthalten." msgid "Operations must be JSON objects." msgstr "Operationen müssen JSON-Objekte sein." #, python-format msgid "Operator %(op)s is not supported" msgstr "Operator %(op)s wird nicht unterstützt" #, python-format msgid "Original locations is not empty: %s" msgstr "Originalpositionen sind nicht leer: %s" msgid "Owner can't be updated by non admin." msgstr "" "Eigner kann durch einen Benutzer, der kein Administrator ist, nicht " "aktualisiert werden." msgid "Owner must be specified to create a tag." msgstr "Der Eigentümer muss zum Erstellen eines Schlagwortes angegeben werden." msgid "Owner of the image" msgstr "Eigentümer des Abbildes" msgid "Owner of the namespace." msgstr "Eigentümer des Namensbereichs. " msgid "Param values can't contain 4 byte unicode." msgstr "Parameterwerte dürfen kein 4-Byte-Unicode enthalten." msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "Namensteil einer Pipeline in Ihrer Paste-Konfigurationsdatei mit entferntem " "Dienstenamen. Beispiel: Wenn Ihr Paste-Abschnittsname [pipeline:glance-api-" "keystone] lautet, verwenden Sie den Wert \"keystone\"" msgid "Path to the directory where json metadata files are stored" msgstr "Pfad zu dem Verzeichnis, in dem JSON-Metadatendateien gespeichert sind" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" "Der Plug-in-Name '%(plugin)s' muss dem Typnamen des Artefakts '%(name)s' " "entsprechen" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "Zeiger `%s` enthält \"~\", das nicht Teil einer erkannten Escapezeichenfolge " "ist." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Der Zeiger `%s` enthält ein angrenzendes \"/\"." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "Der Zeiger `%s` enthält kein gültiges Token." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Zeiger `%s` beginnt nicht mit \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "Der Zeiger `%s` endet mit einem \"/\"." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "" "Der Zeiger enthält '~', das nicht Teil einer erkannten Escapezeichenfolge " "[~0, ~1] ist." #, python-format msgid "Port \"%s\" is not valid." msgstr "Port \"%s\" ist nicht gültig." msgid "Port the registry server is listening on." msgstr "Der Port, auf dem der Registrierungsserver hört." #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "" "Numerische Komponente des Vorabrelease ist zu groß (maximal %d Zeichen)" msgid "Private key file to use when starting API server securely." msgstr "Private Schlüsseldatei zum sicheren Start des API-Servers." #, python-format msgid "Process %d not running" msgstr "Prozess %d wird nicht ausgeführt" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "Eigenschaften %s müssen vor dem Speichern von Daten festgelegt werden." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "Eigenschaft %(property_name)s beginnt nicht mit dem erwarteten " "Zuordnungspräfix für Ressourcentypen '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "Eigenschaft %s ist bereits vorhanden." #, python-format msgid "Property %s does not exist." msgstr "Eigenschaft %s ist nicht vorhanden." #, python-format msgid "Property %s may not be removed." msgstr "Eigenschaft %s darf nicht entfernt werden." #, python-format msgid "Property %s must be set prior to saving data." msgstr "Eigenschaft %s muss vor dem Speichern von Daten festgelegt werden." #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "" "Eigenschaft '%(name)s' verfügt möglicherweise nicht über Wert '%(val)s': " "%(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "Eigenschaft '%s' ist geschützt" msgid "Property names can't contain 4 byte unicode." msgstr "Eigenschaftsnamen dürfen kein 4-Byte-Unicode enthalten." #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "Kein Eigenschaftsschutz für Operation %(operation)s für Regel %(rule)s " "gefunden. Zum Durchführen dieser Operation ist keine Rolle zulässig. " #, python-format msgid "Property's %(prop)s value has not been found" msgstr "Der Wert der Eigenschaft %(prop)s wurde nicht gefunden" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "Die angegebene Abbildgröße muss der gespeicherten Abbildgröße entsprechen. " "(angegebene Größe: %(ps)d, gespeicherte Größe: %(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "Angegebenes Objekt passt nicht zu Schema '%(schema)s': %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Der angegebene Status der Task wird nicht unterstützt: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Der angegebene Typ der Task wird nicht unterstützt: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "" "Stellt eine benutzerfreundliche Beschreibung des Namensbereichs bereit. " msgid "Public images do not have members." msgstr "Öffentliche Abbilder haben keine Mitglieder." msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "Öffentliche URL zur Verwendung für Versionsendpunkte. Standard ist 'None', " "wobei das Attribut 'host_url' der Anforderung zum Ausfüllen der URL-Basis " "verwendet wird. Wenn Glance hinter einem Proxy betrieben wird, möchten Sie " "dies möglicherweise so ändern, dass die URL des Proxy widergegeben wird." msgid "Python module path of data access API" msgstr "Python-Modulpfad von Datenzugriffs-API" msgid "Received invalid HTTP redirect." msgstr "Ungültige HTTP-Umleitung erhalten." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Umleitung auf %(uri)s für Autorisierung." #, python-format msgid "Registry service can't use %s" msgstr "Registrierungsdienst kann %s nicht verwenden" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Registrierungsdatenbank wurde nicht ordnungsgemäß auf einem API-Server " "konfiguriert. Grund: %(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "Die Beziehung %(name)s verfügt möglicherweise nicht über mehrere Werte" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Erneutes Laden von %(serv)s nicht unterstützt" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "%(serv)s (PID %(pid)s) wird mit Signal (%(sig)s) erneut geladen" #, python-format msgid "Removing stale pid file %s" msgstr "Veraltete PID-Datei %s wird entfernt" msgid "Request body must be a JSON array of operation objects." msgstr "" "Anforderungshauptteil muss eine JSON-Array mit Operationsobjekten sein." msgid "Request must be a list of commands" msgstr "Die Anfrage muss eine Liste von Kommandos sein" msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "" "Es fehlen erforderliche Abbildeigenschaften für die Signaturverifizierung. " "Die Signatur konnte nicht verifiziert werden. " #, python-format msgid "Required store %s is invalid" msgstr "Der verlangte Speicher %s ist ungültig" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "Ressourcentypennamen sollten möglichst immer an den Heat-Ressourcentypen " "ausgerichtet werden: http://docs.openstack.org/developer/heat/template_guide/" "openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Antwort von Keystone enthält keinen Glance-Endpunkt." msgid "Role used to identify an authenticated user as administrator." msgstr "" "Die verwendete Rolle um einen angemeldeten Benutzer als Administrator zu " "identifizieren." msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "Als Prozess mit langer Laufzeit ausführen. Wenn dies nicht angegeben ist " "(Standardwert), wird die scrub-Operation einmal ausgeführt und der Vorgang " "dann beendet. Ist dies angegeben, wird der Vorgang nicht beendet und scrub " "wird im Intervall 'wakeup_time' wie in der Konfiguration angegeben " "ausgeführt. " msgid "Scope of image accessibility" msgstr "Umfang der Abbildzugänglichkeit" msgid "Scope of namespace accessibility." msgstr "Umfang der Zugänglichkeit des Namensbereichs. " msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "" "Geheimer Schlüssel, der zum Unterzeichnen von Tracenachrichten der Glance " "API- und Glance Registry-Dienste." #, python-format msgid "Server %(serv)s is stopped" msgstr "Server %(serv)s wurde gestoppt" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Erstellung von Server-Worker fehlgeschlagen: %(reason)s." msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "Legt eine systemweite Limitierung für jeden Benutzer fest. Dieser Wert ist " "die Gesamtkapazität, die ein Benutzer in allen Speichersystemen " "zusammengenommen verwenden kann. Der Wert 0 bedeutet 'unbegrenzt'. Für den " "Wert kann eine optionale Einheit angegeben werden. Zulässige Einheiten sind " "B, KB, MB, GB und TB (diese stehen für Bytes, Kilobytes, Megabytes, " "Gigabytes bzw. Terabytes). Bei fehlender Einheitenangabe werden Bytes " "angenommen. Beachten Sie, dass zwischen dem Wert und der Einheit kein " "Leerzeichen stehen sollte und dass bei Einheiten die Groß-/Kleinschreibung " "beachtet werden muss." #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "" "Das Anzeigen von Ebene %(shl)s wird bei dieser Operation nicht unterstützt" msgid "Signature verification failed" msgstr "Signaturverifizierung fehlgeschlagen" msgid "Signature verification failed." msgstr "Signaturverifizierung fehlgeschlagen." msgid "Size of image file in bytes" msgstr "Größe der Abbilddatei in Byte " msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Bei manchen Ressourcentypen sind mehrere Schlüssel/Wert-Paare pro Instanz " "zulässig. Cinder lässt z. B. Benutzer- und Abbildmetadaten für Datenträger " "zu. Nur die Metadaten der Imageeigenschaften werden von Nova ausgewertet " "(Planung oder Treiber). Diese Eigenschaft lässt zu, dass ein " "Namensbereichsziel die Mehrdeutigkeit entfernt. " msgid "Sort direction supplied was not valid." msgstr "Die angegebene Sortierrichtung war nicht gültig. " msgid "Sort key supplied was not valid." msgstr "Der angegebene Sortierschlüssel war nicht gültig. " msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Gibt das Präfix an, das für den angegebenen Ressourcentyp zu verwenden ist. " "Alle Eigenschaften im Namensbereich sollten dieses Präfix aufweisen, wenn " "sie auf den angegebenen Ressourcentyp angewendet werden. Muss " "Präfixtrennzeichen aufweisen (z. B. einen Doppelpunkt :)." msgid "Specifies which task executor to be used to run the task scripts." msgstr "" "Gibt an, welches Tasksteuerprogramm zum Ausführen der Taskscripts zu " "verwenden ist. " msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "Status muss \"pending\", \"accepted\" oder \"rejected\" sein." msgid "Status not specified" msgstr "Status nicht angegeben" msgid "Status of the image" msgstr "Status des Abbildes" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "Der Statusübergang von %(cur_status)s zu %(new_status)s ist nicht zulässig" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "%(serv)s (PID %(pid)s) wird mit Signal (%(sig)s) gestoppt" #, python-format msgid "Store for image_id not found: %s" msgstr "Speicher für image_id nicht gefunden: %s" #, python-format msgid "Store for scheme %s not found" msgstr "Speicher für Schema %s nicht gefunden" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "Angaben für %(attr)s (%(supplied)s) und %(attr)s, die aus dem hochgeladenen " "Abbild (%(actual)s) generiert wurden, stimmten nicht überein. Abbildstatus " "wird auf 'killed' gesetzt." msgid "Supported values for the 'container_format' image attribute" msgstr "Unterstützte Werte für das 'container_format' Abbild-Attribut" msgid "Supported values for the 'disk_format' image attribute" msgstr "Unterstützte Werte für das Abbildattribut 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "Erneute Generierung wurde unterdrückt, da %(serv)s %(rsn)s war." msgid "System SIGHUP signal received." msgstr "System-SIGHUP-Signal empfangen. " #, python-format msgid "Task '%s' is required" msgstr "Task '%s' ist erforderlich" msgid "Task does not exist" msgstr "Task ist nicht vorhanden" msgid "Task failed due to Internal Error" msgstr "Task fehlgeschlagen. Grund: Interner Fehler" msgid "Task was not configured properly" msgstr "Die Task war nicht ordnungsgemäß konfiguriert" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "Die Task mit der angegebenen ID %(task_id)s wurde nicht gefunden" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "Der Filter \"changes-since\" ist bei Version 2 nicht mehr verfügbar." #, python-format msgid "The CA file you specified %s does not exist" msgstr "" "Die von Ihnen angegebene Zertifizierungsstellendatei %s ist nicht vorhanden" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "Das Objekt von Abbild %(image_id)s, das von Task %(task_id)s erstellt wurde, " "befindet sich nicht mehr in einem gültigen Status zur weiteren Verarbeitung." msgid "The Store URI was malformed." msgstr "Die Speicher-URI war fehlerhaft." msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "Die URL des Keystone-Service. Wenn \"use_user_token\" nicht wirksam ist und " "die Keystone-Authentifizierung verwendet wird, kann die Keystone-URL " "angegeben werden. " msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "" "Die Adresse, an der der Swift-Authentifizierungsservice empfangsbereit ist. " "(veraltet)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Das Administratorkennwort. Wenn \"use_user_token\" nicht wirksam ist, können " "Berechtigungsnachweise für den Administrator angegeben werden. " msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Der Administratorname. Wenn \"use_user_token\" nicht wirksam ist, können " "Berechtigungsnachweise für den Administrator angegeben werden. " msgid "The amount of time in seconds to delay before performing a delete." msgstr "Zeit in Sekunden bevor die Löschung durchgeführt wird." msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "Der Zeitraum, in dem ein unvollständiges Abbild im Zwischenspeicher bleiben " "kann, bevor die Zwischenspeicherbereinigungsfunktion, falls sie aktiv ist, " "das unvollständige Abbild entfernt." msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "" "Der Rückstandswert, der beim Erstellen des TCP-Listener-Socket verwendet " "wird." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Die von Ihnen angegebene Zertifizierungsdatei %s ist nicht vorhanden" msgid "The config file that has the swift account(s)configs." msgstr "Die Konfigurationsdatei mit den Swift-Konto-Konfigurationen. " msgid "The current status of this task" msgstr "Der aktuelle Status dieser Task" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "Das Gerät, auf dem sich das Abbild-Zwischenspeicherverzeichnis " "%(image_cache_dir)s befindet, unterstützt xattr nicht. Wahrscheinlich müssen " "Sie fstab bearbeiten und die Option user_xattr zur entsprechenden Zeile für " "das Gerät, auf dem sich das Zwischenspeicherverzeichnis befindet, hinzufügen." msgid "The driver to use for image cache management." msgstr "Der Treiber für die Abbild-Zwischenspeicherverwaltung." #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "" "Das Format für die Versionsangabe %s ist nicht gültig. Verwenden Sie die " "semver-Notation." msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "Das Format, in das Abbilder automatisch konvertiert werden sollen. Bei der " "Verwendung des RBD-Back-Ends sollte der Wert auf 'raw' gesetzt werden. " #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "Der angegebene URI ist ungültig. Geben Sie einen gültigen URI aus der " "folgenden Liste mit unterstützten URIs %(supported)s an." msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "" "Hostname/IP des pydev-Prozesses, der für Fehlerbehebungsverbindungen " "empfangsbereit ist" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "Das Abbild %s ist bereits auf dem Slave vorhanden, aber bei der Überprüfung " "wurde es nicht gefunden. Dies bedeutet, dass wir nicht über die " "Berechtigungen zum Anzeigen aller Abbilder auf dem Slave-Server verfügen." #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "Das eingehende Artefakt-BLOB ist zu groß: %s" #, python-format msgid "The incoming image is too large: %s" msgstr "Das eingehende Abbild ist zu groß: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Die von Ihnen angegebene Schlüsseldatei %s ist nicht vorhanden" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildpositionen wurde " "überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildmitgliedern wurde für dieses " "Abbild überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildeigenschaften wurde " "überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildeigenschaften wurde " "überschritten. Versucht: %(num)s, Maximum: %(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "Der Grenzwert für die zulässige Anzahl an Abbildschlagwörter wurde " "überschritten. Versucht: %(attempted)s, Maximum: %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "Die Position %(location)s ist bereits vorhanden" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Die Position weist eine ungültige ID auf: %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "Die Position der Eigenschaftsschutzdatei. Diese Datei enthält die Regeln für " "den Eigenschaftenschutz und die zugeordneten Regeln/Richtlinien. Wenn dieser " "Konfigurationswert nicht angegeben wird, wird der Eigenschaftenschutz " "standardmäßig nicht umgesetzt. Wenn ein Wert angegeben ist und die Datei " "nicht gefunden wird, wird der glance-api-Service nicht gestartet. " #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "Die Metadatendefinition %(record_type)s namens %(record_name)s wurde nicht " "gelöscht. Andere Datensätze verweisen noch darauf. " #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "Der Metadatendefinitionsnamensbereich %(namespace_name)s ist bereits " "vorhanden. " #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "Das Metadatendefinitionsobjekt namens %(object_name)s wurde in Namensbereich " "%(namespace_name)s nicht gefunden. " #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "Die Metadatendefinitionseigenschaft namens %(property_name)s wurde nicht in " "Namensbereich %(namespace_name)s gefunden. " #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "Die Ressourcentypzuordnung der Metadatendefinition zwischen Ressourcentyp " "%(resource_type_name)s und Namensbereich %(namespace_name)s ist bereits " "vorhanden." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "Die Ressourcentypzuordnung der Metadatendefinition zwischen Ressourcentyp " "%(resource_type_name)s und Namensbereich %(namespace_name)s wurde nicht " "gefunden." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "Der Ressourcentyp %(resource_type_name)s der Metadatendefinition wurde nicht " "gefunden. " #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "Der Metadatendefinitionstag namens %(name)s wurde in Namensbereich " "%(namespace_name)s nicht gefunden." msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "" "Der Modus, in dem die Engine ausgeführt wird. Kann 'serial' oder 'parallel' " "sein." msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "Die Anzahl an untergeordneten Prozess-Workers, die zum Bearbeiten von " "Anforderungen erstellt werden. Der Standard entspricht der Anzahl an " "verfügbaren CPUs." msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "Die Anzahl der parallelen Aktivitäten, die gleichzeitig von der Engine " "ausgeführt werden. Der Wert kann größer als eins sein, wenn der Enginemodus " "'parallel' ist." msgid "The parameters required by task, JSON blob" msgstr "Die für die Task erforderlichen Parameter, JSON-Blob-Objekt" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "Der Pfad zur Zertifikatsdatei für SSL-Verbindungen zum Registry-Server, " "falls vorhanden. Alternativ dazu können Sie die Umgebungsvariable " "GLANCE_CLIENT_CERT_FILE auf einen Dateipfad der Zertifizierungsdatei der " "Zertifizierungsstelle setzen" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "Der Pfad zur Zertifikatsdatei der Zertifizierungsstelle für SSL-Verbindungen " "zum Registry-Server, falls vorhanden. Alternativ dazu können Sie die " "Umgebungsvariable GLANCE_CLIENT_CA_FILE auf einen Dateipfad der " "Zertifizierungsdatei der Zertifizierungsstelle setzen. " msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "Der Pfad zur Schlüsseldatei für SSL-Verbindungen zum Registry- Server, falls " "vorhanden. Alternativ dazu können Sie die Umgebungsvariable " "GLANCE_CLIENT_KEY_FILE auf einen Dateipfad der Schlüsseldatei setzen" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "" "Pfad zur sqlite Datei-Datenbank, für das Management des Abbild-" "Zwischenspeichers." msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "Die Zeitdauer in Sekunden, die der API-Server auf den Abschluss einer " "Registrierungsanfrage wartet. Ein Wert von 0 bedeutet keine Begrenzung." msgid "The port on which a pydev process is listening for connections." msgstr "" "Der Port, auf dem ein pydev-Prozess für Verbindungen empfangsbereit ist." msgid "The port on which the server will listen." msgstr "Der Port, auf dem der Server hört." msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "" "Das zu verwendende Protokoll für die Kommunikation mit dem " "Registrierungsserver. Entweder http oder https." #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "" "Der angegebene Body %(body)s ist unter dem angegebenen Schema ungültig: " "%(schema)s" msgid "The provided image is too large." msgstr "Das angegebene Abbild ist zu groß." #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "Der angegebene Pfad '%(path)s' ist ungültig: %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "Der Verweis auf die Standardparameter für den Konto-/Sicherungsspeicher, die " "zum Hinzufügen neuer Abbilder verwendet werden sollen." msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "Die Region für den Authentifizierungsservice. Wenn \"use_user_token\" nicht " "wirksam ist und die Keystone-Authentifizierung verwendet wird, kann der " "Regionsname angegeben werden. " msgid "The request returned 500 Internal Server Error." msgstr "" "Die Anforderung hat eine Nachricht vom Typ '500 - interner Serverfehler' " "zurückgegeben." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "Die Anforderung hat eine Nachricht vom Typ '503 - Service nicht verfügbar' " "zurückgegeben. Dies geschieht im Allgemeinen bei einer Serviceüberbelastung " "oder einem anderen temporären Ausfall." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "Die Anforderung hat eine Nachricht vom Typ '302 - Mehrere Möglichkeiten' " "zurückgegeben. Dies weist im Allgemeinen darauf hin, dass Sie bei einem " "Anfrage-URI keinen Versionsindikator angegeben haben.\n" "\n" "Nachrichtentext der zurückgegebenen Antwort:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "Die Anforderung hat eine Nachricht vom Typ '413 - Anforderungsentität zu " "groß' zurückgegeben. Dies weist im Allgemeinen darauf hin, dass die " "Geschwindigkeitsbegrenzung oder ein Kontingentschwellenwert überschritten " "wurde.\n" "\n" "Der Antworttext:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "Die Anforderung hat einen unerwarteten Status zurückgegeben: %(status)s.\n" "\n" "Der Antworttext:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "Das angeforderte Abbild wurde deaktiviert. Der Download von Abbilddaten ist " "nicht zulässig. " msgid "The result of current task, JSON blob" msgstr "Das Ergebnis der aktuellen Task, JSON-Blob-Objekt" msgid "The signature data was not properly encoded using base64" msgstr "Die Signaturdaten wurden nicht ordnungsgemäß mit base64 codiert." #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "Die Größe der Daten, mit denen %(image_size)s den Grenzwert überschreiten " "wird. %(remaining)s Byte verbleiben." msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "Die Größe des zu verwendenden Thread-Pools für die Bereinigung von " "Abbildern. Der Standardwert eins bezeichnet die serielle Bereinigung. Jeder " "Wert über eins gibt die maximale Anzahl der Abbilder an, die parallel " "bereinigt werden können." #, python-format msgid "The specified member %s could not be found" msgstr "Das angegebene Mitglied %s konnte nicht gefunden werden" #, python-format msgid "The specified metadata object %s could not be found" msgstr "Das angegebene Metadatenobjekt %s konnte nicht gefunden werden" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "Das angegebene Metadatenschlagwort %s konnte nicht gefunden werden" #, python-format msgid "The specified namespace %s could not be found" msgstr "Der angegebene Namensbereich %s konnte nicht gefunden werden" #, python-format msgid "The specified property %s could not be found" msgstr "Die angegebene Eigenschaft %s konnte nicht gefunden werden" #, python-format msgid "The specified resource type %s could not be found " msgstr "Der angegebene Ressourcentyp %s konnte nicht gefunden werden" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "Der Status der Position des gelöschten Abbildes kann nur auf " "'pending_delete' oder auf 'deleted' gesetzt werden." msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "Der Status der Position des gelöschten Abbild kann nur auf 'pending_delete' " "oder auf 'deleted' gesetzt werden." msgid "The status of this image member" msgstr "Der Status dieses Abbildelements" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "Die zum Abrufen der Speichervorgabenreihenfolge zu verwendenden " "Speichernamen. Der Name muss durch einen der Speicher registriert worden " "sein, die von der Konfigurationsoption 'stores' definiert werden. Diese " "Option wird angewendet, wenn Sie die Option 'store_type' als " "Imagepositionsstrategie verwenden, die von der Konfigurationsoption " "'location_strategy' definiert wird." msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "Die für die Authentifizierung zu verwendende Strategie. Wenn \"use_user_token" "\" nicht wirksam ist, kann die Authentifizierungsstrategie angegeben werden. " #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" "Das Zielmitglied %(member_id)s ist dem Abbild %(image_id)s bereits " "zugeordnet." msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "Der Nutzername des Benutzers mit Verwaltungsaufgaben. Wenn \"use_user_token" "\" nicht wirksam ist, kann der Administratornutzername angegeben werden. " msgid "The type of task represented by this content" msgstr "Der Typ der durch diesen Inhalt dargestellten Task" msgid "The unique namespace text." msgstr "Der eindeutige Text für den Namensbereich. " msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "Die Obergrenze (maximale Größe des kumulierten Zwischenspeichers in Byte), " "über der die Zwischenspeicherbereinigungsfunktion, falls sie aktiv ist, mit " "dem Bereinigen des Abbild-Zwischenspeicher beginnt. " msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Der benutzerfreundliche Name für den Namensbereich. Wird von der " "Benutzerschnittstelle verwendet, falls verfügbar. " msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "" "Der beim Swift-Authentifizierungsservice zu authentifizierende Benutzer " "(veraltet)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "Der Wert für die Socket-Option TCP_KEEPIDLE. Dies ist die Zeit in Sekunden, " "die die Verbindung inaktiv sein muss, bevor TCP beginnt, Keepalive-" "Stichproben zu senden. " #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "Es ist ein Problem bei %(error_key_name)s %(error_filename)s aufgetreten. " "Überprüfen Sie dies. Fehler: %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "Es ist ein Problem bei %(error_key_name)s %(error_filename)s aufgetreten. " "Überprüfen Sie dies. OpenSSL-Fehler: %(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "Es gibt ein Problem mit Ihrem Schlüsselpaar. Überprüfen Sie, ob das " "Zertifikat %(cert_file)s und der Schlüssel %(key_file)s zusammengehören. " "OpenSSL-Fehler %(ce)s" msgid "There was an error configuring the client." msgstr "Fehler bei Konfiguration des Clients." msgid "There was an error connecting to a server" msgstr "Fehler beim Herstellen einer Verbindung zu einem Server." msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "Dieser Konfigurationswert gibt an, ob \"roles\" oder \"policies\" in der " "Eigenschaftsschutzdatei verwendet wird. " msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Diese Operation ist derzeit bei Glance-Schlagwörtern nicht zulässig. Sie " "werden bei Erreichen der in der Eigenschaft 'expires_at' festgelegten Zeit " "automatisch gelöscht." msgid "This operation is currently not permitted on Glance images details." msgstr "Diese Operation ist derzeit bei Glance-Abbilddetails nicht zulässig." msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "Dieser Wert bestimmt die Strategie, nach der die Reihenfolge der " "Speicherorte der Abbilder bestimmt wird. Gegenwärtig werden zwei Strategien " "mit Glance geliefert: 'location_order' und 'store_type'." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "" "Zeit in Stunden, für die eine Task anschließend aktiv bleibt, entweder bei " "Erfolg oder bei Fehlschlag" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "Zeitlimit für Socketoperationen für Clientverbindungen. Wenn eine eingehende " "Verbindung für diese Anzahl an Sekunden inaktiv ist, wird sie geschlossen. " "Der Wert '0' bedeutet, dass für eine unbegrenzte Dauer gewartet wird." msgid "Too few arguments." msgstr "Zu wenig Argumente" msgid "Too few locations" msgstr "Zu wenige Positionen" msgid "Too many locations" msgstr "Zu viele Positionen" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "Gesamtgröße ist %(size)d Byte in %(img_count)d Images" msgid "Turn on/off delayed delete." msgstr "Verzögertes Löschen ein-/ausschalten." msgid "Type version has to be a valid semver string" msgstr "" "Bei der Typversion muss es sich um eine gültige semver-Zeichenkette handeln" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "URI kann nicht mehrere Vorkommen eines Schemas enthalten. Wenn Sie einen URI " "wie swift://user:pass@http://authurl.com/v1/container/obj angegeben haben, " "müssen Sie ihn ändern, um das swift+http://-Schema verwenden zu können. " "Beispiel: swift+http://user:pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "URL für den Zugriff auf Abbilddatei in externem Speicher" msgid "Unable to PUT to non-empty collection" msgstr "PUT-Operation zum Einreihen in nicht leere Sammlung nicht möglich" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "PID-Datei %(pid)s kann nicht erstellt werden. Wird nicht als Root " "ausgeführt?\n" "Es wird auf eine temporäre Datei zurückgegriffen; Sie können den Dienst " "%(service)s stoppen mithilfe von:\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Filtern mit dem unbekannten Operator '%s' nicht möglich." msgid "Unable to filter on a range with a non-numeric value." msgstr "Filtern in einem Bereich mit nicht numerischem Wert nicht möglich." msgid "Unable to filter on a unknown operator." msgstr "Filtern mit einem unbekannten Operator nicht möglich." msgid "Unable to filter using the specified operator." msgstr "Filtern mit dem angegebenen Operator nicht möglich." msgid "Unable to filter using the specified range." msgstr "Filtern mit dem angegebenen Bereich nicht möglich." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "'%s' kann in JSON-Schemaänderung nicht gefunden werden" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "'op' wurde in JSON-Schemaänderung nicht gefunden. Es muss eine der folgenden " "Optionen verwendet werden: %(available)s." msgid "Unable to get legacy image" msgstr "Traditionelles Abbild kann nicht abgerufen werden" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Grenzwert für Dateideskriptoren kann nicht erhöht werden. Wird nicht als " "Root ausgeführt?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "%(app_name)s kann nicht aus Konfigurationsdatei %(conf_file)s geladen " "werden.\n" "Abgerufen: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Schema kann nicht geladen werden: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "Konfigurationsdatei zum Einfügen für %s konnte nicht gefunden werden." msgid "Unable to modify collection in immutable or readonly property" msgstr "" "Die Sammlung in der nicht veränderbaren oder schreibgeschützten Eigenschaft " "kann nicht geändert werden" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "Das Zertifikat mit der folgenden ID konnte nicht abgerufen werden: %s" msgid "Unable to retrieve request id from context" msgstr "Anforderungs-ID kann nicht von Kontext abgerufen werden" msgid "Unable to specify artifact type explicitly" msgstr "Artefakttyp kann nicht explizit angegeben werden" msgid "Unable to specify artifact type version explicitly" msgstr "Artefakttypversion kann nicht explizit angegeben werden" msgid "Unable to specify version if multiple types are possible" msgstr "Version kann nicht angegeben werden, wenn mehrere Typen möglich sind" msgid "Unable to specify version if type is not specified" msgstr "Version kann nicht angegeben werden, wenn Typ nicht angegeben wurde" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "" "Hochladen von doppelten Abbilddaten für Abbild %(image_id)s nicht möglich: " "%(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "" "Die Signatur kann nicht verifiziert werden, weil der Algorithmus auf diesem " "System nicht unterstützt wird." #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "Signatur kann nicht geprüft werden: %(reason)s" msgid "Unauthorized image access" msgstr "Unauthorisierter Abbildzugriff" msgid "Unexpected body type. Expected list/dict." msgstr "Unerwarteter Hauptteiltyp. Erwartet wurde list/dict." #, python-format msgid "Unexpected response: %s" msgstr "Unerwartete Antwort: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Unbekannte Authentifizierungsstrategie '%s'" #, python-format msgid "Unknown command: %s" msgstr "Unbekanntes Kommando: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Unbekannte Sortierrichtung; muss 'desc' oder 'asc' sein" msgid "Unrecognized JSON Schema draft version" msgstr "Unerkannte JSON-Schemaentwurfsversion" msgid "Unrecognized changes-since value" msgstr "Unerkannter Wert für 'changes-since'" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "Nicht unterstützter Wert für 'sort_dir'. Zulässige Werte: %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "Nicht unterstützter Wert für 'sort_key'. Zulässige Werte: %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "" "Wert %(value)d außerhalb des gültigen Bereichs. Darf %(max)d nicht " "überschreiten" msgid "Value is greater than maximum" msgstr "Der Wert überschreitet den Maximalwert" msgid "Value is less than minimum" msgstr "Der Wert unterschreitet den Mindestwert" msgid "Value is required" msgstr "Wert ist erforderlich" #, python-format msgid "Version component is too large (%d max)" msgstr "Versionskomponente ist zu groß (maximal %d)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "Version ist ungültig: %(reason)s" msgid "Virtual size of image in bytes" msgstr "Virtuelle Größe des Abbildes in Byte" msgid "Visibility must be either \"public\" or \"private\"" msgstr "Sichtbarkeit muss entweder \"öffentlich\" oder \"privat\" sein" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Es wurde 15 Sekunden auf den Abbruch von PID %(pid)s (%(file)s) gewartet; " "Abbruch" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "Bei 'false' können keine Artefakte geladen werden, unabhängig von " "available_plugins. Bei 'true' können Artefakte geladen werden." msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "Wenn der Server im SSL-Modus läuft, müssen Sie sowohl für die 'cert_file'- " "als auch für die 'key_file'-Option in Ihrer Konfigurationsdatei einen Wert " "angeben" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "Diese Option, wenn gesetzt, macht den Eigentümer des Abbilds zum Mandanten. " "Andernfalls wird der angemeldete Benutzer, der die Anfrage stellt, zum " "Eigentümer." msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "Beim Verwenden von SSL in Verbindungen zum Registry-Server, keine " "Überprüfung über eine Zertifizierungsstelle anfordern. Dies ist die Registry-" "Entsprechung für das Angeben von --insecure in der Befehlszeile unter " "Verwendung von glanceclient für die API. " msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "Ob es Benutzern erlaubt sein soll, Abbildeigenschaften anzugeben, die über " "das hinausgehen, was vom Abbildschema angegeben wird" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "Angabe, ob die Speicherpositionen von Backendabbildern in " "Abbildeigenschaften eingeschlosen werden sollen. Beispiel: Bei Verwendung " "des Dateisystemspeichers wird die URL \"file:///path/to/image\" an den " "Benutzer im Metadatenfeld 'direct_url' zurückgegeben. Das Sichtbarmachen der " "Speicherposition kann ein Sicherheitsrisiko darstellen. Verwenden Sie diese " "Einstellung daher mit Bedacht. Bei Angabe von 'true' wird die Option " "'show_image_direct_url' überschrieben." msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "Ob die Speicherposition des Backendabbildes in Abbildeigenschaften " "eingeschlossen werden soll. Das Angeben der Speicherposition kann ein " "Sicherheitsrisiko darstellen. Verwenden Sie diese Einstellung also mit " "Bedacht!" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "Gibt an, ob Header weitergeleitet werden sollen, die Benutzer- und " "Nutzerinformationen enthalten, wenn Anforderungen an die " "Registrierungsdatenbank gestellt werden. Dies ermöglicht es der " "Registrierungsdatenbank, die Kontext-Middleware ohne die auth_token-" "Middleware der Keystone-Middleware zu verwenden, sodass Aufrufe an den " "Keystone-Authentifizierungsservice entfernt werden. Es wird empfohlen, bei " "Verwendung dieser Option eine sichere Kommunikation zwischen Glance-API und " "Glance-Registrierungsdatenbank mit anderen Mitteln als mit der auth_token-" "Middleware sicherzustellen." msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "Gibt an, ob das Benutzertoken durchlaufen werden soll, wenn Anforderungen an " "die Registry gesendet werden. Um Fehler mit dem Ablauf des Tokens beim " "Hochladen von großen Dateien zu verhindern, wird empfohlen, diesen Parameter " "auf False festzulegen. Wenn \"use_user_token\" nicht wirksam ist, können " "Berechtigungsnachweise für den Administrator angegeben werden." msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "Arbeitsverzeichnis für asynchrone Verarbeitungsvorgänge. Das hier " "festgelegte Verzeichnis wird verwendet, um über Abbilder betrieben zu " "werden, in der Regel, bevor sie in den Zielspeicher importiert werden. " "Stellen Sie beim Angeben des Arbeitsverzeichnisses sicher, dass ausreichend " "Speicherplatz für die effiziente Ausführung gleichzeitiger Tasks vorhanden " "ist, ohne dass der Speicherplatz zu gering wird. Eine ungefähre Schätzung " "können Sie vornehmen, indem Sie die Anzahl von `max_workers` - oder N aktive " "Worker - mit der durchschnittlichen Abbildgröße(z. B. 500 MB) " "multiplizieren. Die Schätzung für die Abbildgröße sollte auf der " "durchschnittlichen Größe in Ihrer Implementierung basierend vorgenommen " "werden. Beachten Sie, dass Sie diese Anzahl abhängig von den aktiven " "Tasksmöglicherweise mit einem bestimmten Faktor multiplizieren müssen, je " "nachdem, wofür diese Task zuständig ist. Die verfügbare Größe sollte z. B. " "verdoppelt werden, wenn die Abbildkonvertierung aktiviert ist. Beachten Sie, " "dass es sich hierbei nur um Schätzungen handelt, die auf Grundlage des Worst-" "Case-Szenarios durchgeführt werden sollten und dass Sie darauf vorbereitet " "sein müssen, zu agieren, wenn die Schätzungen falsch waren. " #, python-format msgid "Wrong command structure: %s" msgstr "Falsche Kommandostruktur: %s" msgid "You are not authenticated." msgstr "Sie sind nicht authentifiziert." msgid "You are not authorized to complete this action." msgstr "Sie sind nicht dazu authorisiert, diese Aktion abzuschließen" #, python-format msgid "You are not authorized to lookup image %s." msgstr "Sie sind nicht berechtigt, Abbild %s zu suchen." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "Sie sind nicht berechtigt, die Mitglieder des Abbild %s zu suchen." #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "" "Sie können keine Schlagwörter in Namensbereichen erstellen, die '%s' gehören." msgid "You are not permitted to create image members for the image." msgstr "Sie können keine Abbildelemente für das Abbild erstellen." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Sie können keine Abbilder erstellen, die '%s' gehören." #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "Sie können keine Namensbereiche erstellen, die '%s' gehören." #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "Sie können keine Objekte erstellen, die '%s' gehören." #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "Sie können keine Eigenschaften erstellen, die '%s' gehören." #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "Sie können keinen resource_type erstellen, der '%s' gehört." #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "Sie können diese Task nicht mit dem Eigentümer %s erstellen" msgid "You are not permitted to deactivate this image." msgstr "Sie können dieses Abbild nicht deaktivieren. " msgid "You are not permitted to delete this image." msgstr "Sie können dieses Abbild nicht löschen." msgid "You are not permitted to delete this meta_resource_type." msgstr "Sie können diesen meta_resource_type nicht löschen." msgid "You are not permitted to delete this namespace." msgstr "Sie können diesen Namensbereich nicht löschen." msgid "You are not permitted to delete this object." msgstr "Sie können dieses Objekt nicht löschen." msgid "You are not permitted to delete this property." msgstr "Sie können diese Eigenschaft nicht löschen." msgid "You are not permitted to delete this tag." msgstr "Sie können dieses Schlagwort nicht löschen." #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "" "Sie haben keine Berechtigung, um '%(attr)s' für %(resource)s zu ändern." #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "Sie können '%s' bei diesem Abbild nicht ändern." msgid "You are not permitted to modify locations for this image." msgstr "Sie können Positionen für dieses Abbild nicht ändern." msgid "You are not permitted to modify tags on this image." msgstr "Sie können Schlagwörter bei diesem Abbild nicht ändern." msgid "You are not permitted to modify this image." msgstr "Sie können dieses Abbild nicht ändern." msgid "You are not permitted to reactivate this image." msgstr "Sie können dieses Abbild nicht erneut aktivieren. " msgid "You are not permitted to set status on this task." msgstr "Sie können den Status für diese Task nicht festlegen. " msgid "You are not permitted to update this namespace." msgstr "Sie können diesen Namensbereich nicht aktualisieren. " msgid "You are not permitted to update this object." msgstr "Sie können dieses Objekt nicht aktualisieren. " msgid "You are not permitted to update this property." msgstr "Sie können diese Eigenschaft nicht aktualisieren. " msgid "You are not permitted to update this tag." msgstr "Sie können dieses Schlagwort nicht aktualisieren." msgid "You are not permitted to upload data for this image." msgstr "Sie können keine Daten für dieses Abbild hochladen." #, python-format msgid "You cannot add image member for %s" msgstr "Hinzufügen von Abbildelement für %s nicht möglich" #, python-format msgid "You cannot delete image member for %s" msgstr "Löschen von Abbildelement für %s nicht möglich" #, python-format msgid "You cannot get image member for %s" msgstr "Abrufen von Abbildelement für %s nicht möglich" #, python-format msgid "You cannot update image member %s" msgstr "Aktualisieren von Abbildelement %s nicht möglich" msgid "You do not own this image" msgstr "Sie sind nicht Eigner dieses Images" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Sie haben sich dafür entschieden, SSL für die Verbindung zu verwenden, und " "Sie haben ein Zertifikat angegeben. Allerdings haben Sie weder einen " "key_file-Parameter angegeben noch die GLANCE_CLIENT_KEY_FILE-" "Umgebungsvariable festgelegt" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Sie haben sich dafür entschieden, SSL für die Verbindung zu verwenden, und " "Sie haben einen Schlüssel angegeben. Allerdings haben Sie weder einen " "cert_file-Parameter angegeben noch die GLANCE_CLIENT_CERT_FILE-" "Umgebungsvariable festgelegt" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() hat unerwartetes Schlüsselwortargument '%s' erhalten" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "Übergang von %(current)s zu %(next)s in Aktualisierung nicht möglich " "(gewünscht ist from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "Benutzerdefinierte Eigenschaften (%(props)s) stehen im Konflikt mit " "Basiseigenschaften" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Hub weder für Eventlet 'poll' noch für 'selects' ist auf dieser Plattform " "verfügbar" msgid "is_public must be None, True, or False" msgstr "'is_public' muss 'None', 'True' oder 'False' sein" msgid "limit param must be an integer" msgstr "'limit'-Parameter muss eine Ganzzahl sein" msgid "limit param must be positive" msgstr "'limit'-Parameter muss positiv sein" #, python-format msgid "location: %s data lost" msgstr "Position: %s Daten verloren" msgid "md5 hash of image contents." msgstr "md5-Hashwert von Abbildinhalten. " #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() hat unerwartete Schlüsselwörter %s erhalten" msgid "protected must be True, or False" msgstr "'protected' muss 'True' oder 'False' sein" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "%(serv)s kann nicht gestartet werden. Fehler: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id ist zu lang. Max. Größe %s" glance-12.0.0/glance/locale/fr/0000775000567000056710000000000012701407204017260 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/fr/LC_MESSAGES/0000775000567000056710000000000012701407204021045 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/fr/LC_MESSAGES/glance.po0000664000567000056710000035225612701407051022653 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Arnaud Legendre , 2013 # Christophe kryskool , 2013 # EVEILLARD , 2013-2014 # Maxime COQUEREL , 2014 # EVEILLARD , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Martine Marin , 2016. #zanata # Tom Cocozzello , 2016. #zanata # leroy , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-24 10:40+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-28 06:26+0000\n" "Last-Translator: Martine Marin \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s doit être une chaîne" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s est obligatoire" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s peut ne pas être plus long que %(length)i" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s peut ne pas être plus court que %(length)i" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "" "%(attribute)s doit correspondre au modèle de correspondance %(pattern)s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "" "Une exception %(cls)s s'est produite dans le dernier appel d'une procédure " "distante : %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "%(m_id)s introuvable dans la liste des membres de l'image %(i_id)s." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) est en cours d'exécution..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s semble déjà en cours d'exécution : %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s est enregistré deux fois comme module. %(module)s n'est pas " "utilisé." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_id)s de %(task_type)s ne sont pas configurés correctement. Impossible " "de charger le magasin de système de fichiers" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_id)s de %(task_type)s ne sont pas configurés correctement. Rép de " "travail manquant : %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(verb)s %(serv)s" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "Opération %(verb)s en cours sur %(serv)s avec %(conf)s" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s Veuillez indiquer une paire hôte:port, dans laquelle hôte est une adresse " "IPv4, une adresse IPv6, un nom d'hôte ou un nom de domaine complet. Si vous " "utilisez une adresse IPv6, faites-la figurer entre crochets de façon à la " "séparer du port (par ex., \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s ne peut pas contenir de caractère Unicode de 4 octets." #, python-format msgid "%s is already stopped" msgstr "%s est déjà stoppé" #, python-format msgid "%s is stopped" msgstr "%s est arrêté" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "La valeur %(param)s est hors bornes, elle ne doit pas dépasser %(max)d" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "Option --os_auth_url ou variable d'environnement OS_AUTH_URL requise lorsque " "la stratégie d'authentification keystone est activée\n" msgid "A body is not expected with this request." msgstr "Un corps n'est pas attendu avec cette demande." msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "Liste d'artefacts autorisés sous la forme \"nom\" ou \"nom-version\". Une " "liste vide indique que n'importe quel artefact peut être chargé." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Un objet de la définition de métadonnées avec le nom %(object_name)s existe " "déjà dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "Une propriété de la définition de métadonnées avec le nom %(property_name)s " "existe déjà dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "Un type de ressource de la définition de métadonnées avec le nom " "%(resource_type_name)s existe déjà." #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "Une balise de métadonnées nommée %(name)s existe déjà dans l'espace de nom " "%(namespace_name)s." msgid "A set of URLs to access the image file kept in external store" msgstr "" "Ensemble d'URL pour accéder au fichier image conservé dans le magasin externe" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "Clé AES pour chiffrer les métadonnées d'emplacement de magasin. Celle-ci " "comprend, le cas échéant, des données d'identification Swift ou S3. Elle " "doit être définie à une chaîne aléatoire de 16, 24 ou 32 octets" msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" "Adresse de liaison du serveur. Utile lors de la sélection d'une interface " "réseau en particulier." msgid "Address to find the registry server." msgstr "Adresse pour trouver le serveur de registre." msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "Autorisez les utilisateurs non authentifiés à accéder à l'API avec des " "privilèges en lecture seule. Ceci s'applique seulement à l'utilisation de " "ContextMiddleware." #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "Les valeurs autorisées %s ne sont pas valides sous certains valideurs" msgid "Amount of disk space (in GB) required to boot image." msgstr "" "Quantité d'espace disque (en Go) requise pour l'image d'initialisation." msgid "Amount of ram (in MB) required to boot image." msgstr "Quantité de mémoire RAM (en Mo) requise pour l'image d'initialisation." msgid "An identifier for the image" msgstr "Identificateur de l'image" msgid "An identifier for the image member (tenantId)" msgstr "Identificateur pour le membre de l'image (tenantId)" msgid "An identifier for the owner of this task" msgstr "Un identificateur pour le propriétaire de cette tâche" msgid "An identifier for the task" msgstr "Un identificateur pour la tâche" msgid "An image file url" msgstr "URL d'un fichier image" msgid "An image schema url" msgstr "URL d'un schéma d'image" msgid "An image self url" msgstr "URL d'une image self" #, python-format msgid "An image with identifier %s already exists" msgstr "Une image avec l'identificateur %s existe déjà" msgid "An import task exception occurred" msgstr "Une exception liée à la tâche d'importation s'est produite" msgid "An object with the same identifier already exists." msgstr "Un objet avec le même identificateur existe déjà." msgid "An object with the same identifier is currently being operated on." msgstr "Un objet avec le même identificateur est déjà en cours d'utilisation." msgid "An object with the specified identifier was not found." msgstr "Un objet avec l'identificateur spécifié est introuvable." msgid "An unknown exception occurred" msgstr "Une exception inconnue s'est produite" msgid "An unknown task exception occurred" msgstr "Une exception de tâche inconnue s'est produite" #, python-format msgid "Array has no element at position %d" msgstr "La matrice n'a aucun élément à la position %d" msgid "Array property can't have item_type=Array" msgstr "La propriété de matrice ne peut pas avoir item_type=Array" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "L'artefact %s n'a pas pu être supprimé car il est utilisé : %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "L'artefact ne peut pas passer de l'état %(source)s à l'état %(target)s" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "L'artefact dépasse le quota de stockage : %s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "L'artefact n'a pas de propriété %(prop)s" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "" "Il n'est pas possible de changer l'état de l'artefact de %(curr)s à %(to)s" #, python-format msgid "Artifact storage media is full: %s" msgstr "Le support de stockage d'artefacts est saturé : %s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "" "Le type d'artefact ayant le nom %(name)s et la version %(version)s est " "inconnu" msgid "Artifact with a circular dependency can not be created" msgstr "Impossible de créer un artefact avec une dépendance en boucle" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "L'artefact avec id=%(id)s n'est pas accessible" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "L'artefact avec id=%(id)s est introuvable" msgid "Artifact with the specified type, name and version already exists" msgstr "L'artefact avec le type, le nom et la version indiqués existe déjà" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "L'artefact avec le type, le nom et la version indiqués a déjà la dépendance " "directedependency=%(dep)s" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "L'artefact avec le type, le nom et la version indiqués a déjà la dépendance " "transitoiredependency=%(dep)s" msgid "Attempt to set readonly property" msgstr "Tentative de définition d'une propriété en lecture seule" msgid "Attempt to set value of immutable property" msgstr "Tentative de définition de la valeur d'une propriété non modifiable" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "Tentative de téléchargement d'image en double : %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "Vous avez tenté de mettre à jour la zone Emplacement pour une image qui n'a " "pas le statut en file d'attente." #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "L'attribut '%(property)s' est en lecture seule." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "L'attribut '%(property)s' est réservé." #, python-format msgid "Attribute '%s' is read-only." msgstr "L'attribut '%s' est en lecture seule." #, python-format msgid "Attribute '%s' is reserved." msgstr "Attribut '%s' est réservé." msgid "Attribute container_format can be only replaced for a queued image." msgstr "" "L'attribut container_format ne peut être remplacé que pour une image mise en " "file d'attente." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" "L'attribut disk_format ne peut être remplacé que pour une image mise en file " "d'attente." msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" "Clé d'authentification de l'utilisateur auprès du service d'authentification " "(obsolète)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "Service d'auth à l'URL %(url)s non trouvé." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "Erreur d'authentification - le jeton a peut-être expiré lors du " "téléchargement de fichier. Suppression des données d'image pour %s." msgid "Authorization failed." msgstr "Echec de l'autorisation." msgid "Available categories:" msgstr "Catégories disponibles :" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "Format de filtre de requête \"%s\" incorrect. Utilisez la notation de date " "et heure ISO 8601." #, python-format msgid "Bad Command: %s" msgstr "Commande %s erronée " #, python-format msgid "Bad header: %(header_name)s" msgstr "Erreur d’entête: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "Valeur incorrecte transmise pour filtrer %(filter)s, %(val)s obtenu" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "URI S3 incorrecte : %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Données d'identification incorrectes '%(creds)s' dans l'URI Swift" msgid "Badly formed credentials in Swift URI." msgstr "Données d'identification incorrectes dans l'URI Swift." msgid "Base directory that the image cache uses." msgstr "Répertoire de base utilisé par le cache d'image." msgid "BinaryObject property cannot be declared mutable" msgstr "La propriété BinaryObject ne peut pas être déclarée comme mutable" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "L'objet BLOB %(name)s peut n'avoir qu'une valeur" msgid "Blob size is not set" msgstr "La taille de l'objet BLOB n'est pas définie" msgid "Body expected in request." msgstr "Corps attendu dans la demande" msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "" "Le fichier et legacy_image_id peuvent ne pas être indiqués au même moment" msgid "CA certificate file to use to verify connecting clients." msgstr "" "Fichier de l'autorité de certification CA à utiliser pour vérifier les " "connexions clientes" msgid "Cannot be a negative value" msgstr "Ne peut pas être une valeur négative" msgid "Cannot be a negative value." msgstr "Ne peut pas être une valeur négative." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "Impossible de convertir l'image %(key)s '%(value)s' en entier." msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "" "Impossible de déclarer la propriété d'artefact avec le nom réservé 'metadata'" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "Impossible de charger l'artefact %(name)s" msgid "Cannot remove last location in the image." msgstr "Impossible de supprimer le dernier emplacement dans l'image." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "" "Les données pour l'image %(image_id)s ne peuvent pas être sauvegardées : " "erreur %(error)s" msgid "Cannot set locations to empty list." msgstr "Impossible de définir des emplacements avec une liste vide." msgid "Cannot specify 'max_size' explicitly" msgstr "Impossible d'indiquer max_size de façon explicite" msgid "Cannot specify 'min_size' explicitly" msgstr "Impossible d'indiquer min_size de façon explicite" msgid "Cannot upload to an unqueued image" msgstr "Téléchargement impossible dans une image non placée en file d'attente" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "Impossible d'utiliser ce paramètre avec l'opérateur %(op)s" msgid "Certificate file to use when starting API server securely." msgstr "" "Fichier certificat à utiliser lors du démarrage du serveur d'API en toute " "sécurité." #, python-format msgid "Certificate format not supported: %s" msgstr "Format de certificat non pris en charge : %s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "Certificat non valide après : %s UTC" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "Certificat non valide avant : %s UTC" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" "Echec de vérification du total de contrôle. Mise en cache de l'image '%s' " "annulée." msgid "Client disconnected before sending all data to backend" msgstr "Client déconnecté avant l'envoi de toutes les données au backend" msgid "Command not found" msgstr "La commande n'a pas été trouvée" msgid "Configuration option was not valid" msgstr "L'option de configuration n'était pas valide" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" "Erreur de connexion/demande erronée pour le service d'auth à l'URL %(url)s." #, python-format msgid "Constructed URL: %s" msgstr "URL construite : %s" msgid "Container format is not specified." msgstr "Le format de conteneur n'a pas été spécifié." msgid "Content-Type must be application/octet-stream" msgstr "Le type de contenu doit être application/octet-stream" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "téléchargement d'image endommagée pour l'image %(image_id)s" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" "Liaison impossible à %(host)s:%(port)s après une tentative de 30 secondes" msgid "Could not find OVF file in OVA archive file." msgstr "Fichier OVF introuvable dans le fichier archive OVA." #, python-format msgid "Could not find metadata object %s" msgstr "L'objet métadonnées %s est introuvable" #, python-format msgid "Could not find metadata tag %s" msgstr "Balise de métadonnées %s introuvable" #, python-format msgid "Could not find namespace %s" msgstr "Espace de nom %s introuvable" #, python-format msgid "Could not find property %s" msgstr "Propriété %s introuvable" msgid "Could not find required configuration option" msgstr "Option de configuration obligatoire introuvable" #, python-format msgid "Could not find task %s" msgstr "La tâche %s est introuvable" #, python-format msgid "Could not update image: %s" msgstr "Impossible de mettre à jour l'image : %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" "Actuellement, les packages OVA contenant plusieurs disques ne sont pas pris " "en charge." msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "La liste des valideurs personnalisés doit contenir des uplets '(fonction, " "message)'" #, python-format msgid "Data for image_id not found: %s" msgstr "Données d'image_id introuvables : %s" msgid "Data supplied was not valid." msgstr "Les données fournies n'étaient pas valides." msgid "Date and time of image member creation" msgstr "Date et heure de création du membre de l'image" msgid "Date and time of image registration" msgstr "Date et heure d'enregistrement de l'image" msgid "Date and time of last modification of image member" msgstr "Date et heure de dernière modification du membre de l'image" msgid "Date and time of namespace creation" msgstr "Date et heure de création de l'espace de nom" msgid "Date and time of object creation" msgstr "Date et heure de création de l'objet" msgid "Date and time of resource type association" msgstr "Date et heure d'association de type de ressource" msgid "Date and time of tag creation" msgstr "Date et heure de création de la balise" msgid "Date and time of the last image modification" msgstr "Date et heure de dernière modification de l'image" msgid "Date and time of the last namespace modification" msgstr "Date et heure de dernière modification de l'espace de nom" msgid "Date and time of the last object modification" msgstr "Date et heure de dernière modification de l'objet" msgid "Date and time of the last resource type association modification" msgstr "" "Date et heure de dernière modification d'association de type de ressource " msgid "Date and time of the last tag modification" msgstr "Date et heure de dernière modification de la balise " msgid "Datetime when this resource was created" msgstr "Date-heure à laquelle cette ressource a été créée" msgid "Datetime when this resource was updated" msgstr "Date-heure à laquelle cette ressource a été mise à jour" msgid "Datetime when this resource would be subject to removal" msgstr "Date-heure à laquelle cette ressource serait soumise à une suppression" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "Valeur par défaut pour le nombre d'éléments renvoyés par une demande, si " "celle-ci n'est pas explicitement définie dans la demande" msgid "Default value is invalid" msgstr "La valeur par défaut n'est pas valide" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "" "Blocage de la tentative de téléchargement de l'artefact vu qu'il entraîne un " "dépassement du quota : %s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" "Refus de la tentative de téléchargement d'une image qui dépasse le quota : %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "" "Refus de la tentative de téléchargement d'une image dont la taille est " "supérieure à %d octets." #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "La propriété de dépendance '%s' doit être supprimée en premier. " msgid "Dependency relations cannot be mutable" msgstr "Les relations des dépendances ne peuvent pas être modifiables" msgid "Deploy the v1 OpenStack Images API." msgstr "Déploiement de l'API v1 OpenStack Images." msgid "Deploy the v1 OpenStack Registry API." msgstr "Déployez l'API du registre OpenStack V1." msgid "Deploy the v2 OpenStack Images API." msgstr "Déploiement de l'API v2 OpenStack Images." msgid "Deploy the v2 OpenStack Registry API." msgstr "Déployez l'API du registre OpenStack V2." msgid "Descriptive name for the image" msgstr "Nom descriptif de l'image" msgid "Dictionary contains unexpected key(s)" msgstr "Le dictionnaire contient une ou plusieurs clés inconnues" msgid "Dictionary size is greater than maximum" msgstr "La taille du dictionnaire a une valeur supérieure à la valeur maximale" msgid "Dictionary size is less than minimum" msgstr "La taille du dictionnaire a une valeur inférieure à la valeur minimale" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "Algorithme de chiffrement qui sera utilisé pour la signature numérique. " "Utilisez la commande \"openssl list-message-digest-algorithms\" pour obtenir " "les algorithmes disponibles pris en charge par la version d'OpenSSL sur la " "plateforme. Exemples : \"sha1\", \"sha256\", \"sha512\", etc." msgid "Disk format is not specified." msgstr "Le format de disque n'a pas été spécifié." msgid "Does not match pattern" msgstr "Ne correspond pas au modèle de correspondance" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "Impossible de configurer le pilote %(driver_name)s correctement. Cause : " "%(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "Un fichier ou un legacy_image_id doit être indiqué" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "Erreur lors du décodage de votre demande. L'URL ou le corps de la demande " "contiennent des caractères que Glance ne peut pas décoder" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "" "Erreur lors de l'extraction des membres de l'image %(image_id)s : " "%(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "" "Erreur dans la configuration du magasin. L'ajout d'artefacts au magasin est " "désactivé." msgid "Error in store configuration. Adding images to store is disabled." msgstr "" "Erreur de configuration du magasin. L'ajout d'images au magasin est " "désactivé." msgid "Error occurred while creating the verifier" msgstr "Une erreur s'est produite lors de la création du vérificateur" msgid "Error occurred while verifying the signature" msgstr "Une erreur s'est produite lors de la vérification de la signature" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "Membre attendu sous la forme : {\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "Statut attendu sous la forme : {\"status\": \"status\"}" msgid "External source should not be empty" msgstr "La source externe ne doit pas être vide" #, python-format msgid "External sources are not supported: '%s'" msgstr "Sources externes non prises en charge : '%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "Echec de l'activation de l'image. Erreur obtenue : %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "Impossible d'ajouter les métadonnées d'image. Erreur obtenue : %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "Impossible de trouver l'artefact %(artifact_id)s à supprimer" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "Échec pour trouver image %(image_id)s à supprimer." #, python-format msgid "Failed to find image to delete: %s" msgstr "Échec pour trouver l'image à supprimer: %s" #, python-format msgid "Failed to find image to update: %s" msgstr "Echec pour trouver l'image à mettre à jour: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "Echec pour trouver le type de ressource %(resourcetype)s a supprimer" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" "Impossible d'initialiser la base de données de caches d'image. Erreur " "obtenue : %s" #, python-format msgid "Failed to read %s from config" msgstr "Echec de la lecture de %s à partir de la config" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "Impossible de réserver l'image. Erreur obtenue : %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "" "Impossible de mettre à jour les métadonnées d'image. Erreur obtenue : %s" #, python-format msgid "Failed to upload image %s" msgstr "Impossible de charger l'image %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "Echec de téléchargement des données image pour l'image %(image_id)s en " "raison d'une erreur HTTP : %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "Echec de téléchargement des données image pour l'image %(image_id)s en " "raison d'une erreur interne : %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "Le fichier %(path)s dispose d'un fichier de sauvegarde non valide : " "%(bfile)s. L'opération est abandonnée." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "Les importations à partir de fichiers sont interdites. Utilisez une source " "externe de données image." msgid "File too large" msgstr "Fichier trop volumineux" msgid "File too small" msgstr "Fichier trop petit" msgid "Forbidden image access" msgstr "Accès interdit à l'image" #, python-format msgid "Forbidden to delete a %s image." msgstr "Interdiction de supprimer une image %s" #, python-format msgid "Forbidden to delete image: %s" msgstr "Interdiction de supprimer l'image: %s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "Interdiction de modifier '%(key)s' de l'image %(status)s." #, python-format msgid "Forbidden to modify '%s' of image." msgstr "Interdiction de modifier l'élément '%s' de l'image." msgid "Forbidden to reserve image." msgstr "Interdiction de réserver une image." msgid "Forbidden to update deleted image." msgstr "Interdiction de mettre à jour l'image supprimée." #, python-format msgid "Forbidden to update image: %s" msgstr "Interdiction de mise à jour de l'image: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "Tentative de téléchargement interdite : %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" "Interdiction de la demande, l'espace de nom %s de la définition de " "métadonnées n'est pas visible." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "Interdiction de la demande, la tâche %s n'est pas visible" msgid "Format of the container" msgstr "Format du conteneur" msgid "Format of the disk" msgstr "Format du disque" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "Echec de l'obtention des données Blob %(name)s : %(err)s." #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "Echec de l'obtention des données de l'image %(id)s : %(err)s." msgid "Glance client not installed" msgstr "Le client Glance n'est pas installé" #, python-format msgid "Host \"%s\" is not valid." msgstr "Host \"%s\" n'est pas valide." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "Host et port \"%s\" ne sont pas valides." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "Message d'information lisible par l'homme inclus uniquement si approprié " "(habituellement en cas d'incident)" msgid "If False doesn't trace SQL requests." msgstr "Si False, ne pas tracer les demandes." msgid "If False fully disable profiling feature." msgstr "Si False, désactiver la fonction de profilage." msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "Si l'option est False, le serveur renverra l'en-tête \"Connection: close\", " "si l'option est True, le serveur renverra \"Connection: Keep-Alive\" dans " "ses réponses. Pour fermer la connexion du socket client explicitement une " "fois la réponse envoyée et lue correctement par le client, il vous suffit de " "définir cette option sur False lorsque vous créez un serveur wsgi." msgid "If true, image will not be deletable." msgstr "Si true, l'image ne pourra pas être supprimée." msgid "If true, namespace will not be deletable." msgstr "Si true, l'espace de nom ne pourra pas être supprimé." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "" "L'image %(id)s n'a pas pu être supprimée car elle est utilisée : %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "Image %(id)s non trouvé" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "Image %(image_id)s introuvable après le téléchargement. Elle a sans doute " "été supprimée au cours du téléchargement : %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "L'image %(image_id)s est protégée et ne peut pas être supprimée." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "L'image %s n'a pas été trouvée après le téléchargement. Elle a sans doute " "été supprimée pendant le téléchargement. Nettoyage des blocs téléchargés." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "" "L'image %s est introuvable après le chargement. L'image a peut-être été " "supprimée lors du chargement." #, python-format msgid "Image %s is deactivated" msgstr "L'image %s est désactivée" #, python-format msgid "Image %s is not active" msgstr "L'image %s n'est pas active" #, python-format msgid "Image %s not found." msgstr "Image %s introuvable." #, python-format msgid "Image exceeds the storage quota: %s" msgstr "l'image %s dépasse le quota de stockage" msgid "Image id is required." msgstr "Id image est requis." msgid "Image is protected" msgstr "L'image est protégée" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "Le nombre maximal de membres est dépassé pour l'image %(id)s : %(e)s :" #, python-format msgid "Image name too long: %d" msgstr "Nom de l'image trop long : %d" msgid "Image operation conflicts" msgstr "Conflits d'opération d'image" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "La transition du statut de l'image de %(cur_status)s vers %(new_status)s " "n'est pas autorisée" #, python-format msgid "Image storage media is full: %s" msgstr "Le support de stockage d'image est saturé : %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "Le nombre maximal de balises est dépassé pour l'image %(id)s : %(e)s :" #, python-format msgid "Image upload problem: %s" msgstr "Problème d'envoi de l'image: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "L'image avec l'identificateur %s existe déjà !" #, python-format msgid "Image with identifier %s has been deleted." msgstr "L'image avec l'identificateur %s a été supprimée." #, python-format msgid "Image with identifier %s not found" msgstr "L'image portant l'ID %s est introuvable" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "L'image avec l'ID %(image_id)s indiqué est introuvable. " #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "Stratégie d'autorisation incorrecte, valeur attendue \"%(expected)s\" mais " "valeur obtenue \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "Requête incorrecte: %s" msgid "Index is out of range" msgstr "Index hors bornes" msgid "Index is required" msgstr "L'index est requis " #, python-format msgid "Input does not contain '%(key)s' field" msgstr "L'entrée ne contient pas la zone '%(key)s'" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "" "Droits d'accès insuffisants sur le support de stockage des artefacts : %s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "Droits insuffisants sur le support de stockage d'image : %s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "Type de contenu non valide pour fonctionner avec %s" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "Pointeur JSON invalide pour cette ressource : '%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "Format de certificat non valide : %s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "" "Total de contrôle '%s' non valide : il ne doit pas comporter plus de 32 " "caractères. " msgid "Invalid configuration in glance-swift conf file." msgstr "" "Configuration non valide dans le fichier de configuration glance-swift." msgid "Invalid configuration in property protection file." msgstr "" "Configuration non valide dans le fichier de verrouillage de propriétés." #, python-format msgid "Invalid container format '%s' for image." msgstr "Format de conteneur '%s' non valide pour l'image." #, python-format msgid "Invalid content type %(content_type)s" msgstr "Type de contenu non valide %(content_type)s" msgid "Invalid dict property type" msgstr "Type de propriété de dictionnaire non valide" msgid "Invalid dict property type specification" msgstr "Spécification de type de propriété de dictionnaire non valide" #, python-format msgid "Invalid disk format '%s' for image." msgstr "Format de disque '%s' non valide pour l'image." #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "Valeur de filtre %s non valide. Les guillemets ne sont pas fermés." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" "Valeur de filtre %s non valide. Il n'y a pas de virgule après la fermeture " "des guillemets." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" "Valeur de filtre %s non valide. Il n'y a pas de virgule avant l'ouverture " "des guillemets." #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "En-têtes \"Content-Type\" non valides : %s" msgid "Invalid image id format" msgstr "Format d'ID image non valide" msgid "Invalid item type specification" msgstr "Spécification de type d'élément non valide" #, python-format msgid "Invalid json body: %s" msgstr "Corps json non valide : %s" msgid "Invalid jsonpatch request" msgstr "Requête jsonpatch non valide" msgid "Invalid location" msgstr "Emplacement non valide" #, python-format msgid "Invalid location %s" msgstr "Emplacement non valide : %s" #, python-format msgid "Invalid location: %s" msgstr "Emplacement non valide : %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "Option location_strategy non valide : %(name)s. La ou les options de " "stratégie valides sont : %(strategies)s" msgid "Invalid locations" msgstr "Emplacements non valides" #, python-format msgid "Invalid locations: %s" msgstr "Emplacements non valides : %s" msgid "Invalid marker format" msgstr "Format de marqueur non valide" msgid "Invalid marker. Image could not be found." msgstr "Marqueur non valide. Image introuvable." #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "Algorithme mask_gen_algorithm non valide : %s" #, python-format msgid "Invalid membership association: %s" msgstr "Association d'appartenance non valide : %s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "Combinaison non valide de formats de disque et de conteneur. Si vous " "définissez un disque ou un conteneur au format 'aki', 'ari' ou 'ami', les " "formats du disque et du conteneur doivent correspondre." #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "" "Opération non valide : `%(op)s`. Doit être l'une des suivantes : " "%(available)s." msgid "Invalid position for adding a location." msgstr "Position non valide pour l'ajout d'un emplacement." msgid "Invalid position for removing a location." msgstr "Position non valide pour la suppression d'un emplacement." msgid "Invalid property definition" msgstr "Définition de propriété non valide" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "Propriété pss_salt_length non valide : %s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "Type de clé publique non valide pour le type de clé de signature : %s" msgid "Invalid reference list specification" msgstr "Spécification de liste de référence non valide" msgid "Invalid referenced type" msgstr "Type référencé non valide" msgid "Invalid request PATCH for work with blob" msgstr "Requête PATCH non valide pour fonctionner avec blob" msgid "Invalid service catalog json." msgstr "json de catalogue de service non valide." #, python-format msgid "Invalid signature hash method: %s" msgstr "Méthode de hachage de la signature non valide : %s" #, python-format msgid "Invalid signature key type: %s" msgstr "Type de clé de signature non valide : %s" #, python-format msgid "Invalid sort direction: %s" msgstr "Sens de tri non valide : %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "Clé de tri non valide : %(sort_key)s. Si la version du type n'est pas " "définie, elle doit être l'une des suivantes : %(available)s." #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "Clé de tri non valide : %(sort_key)s. Doit être l'une des valeurs " "suivantes : %(available)s." #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" "Clé de tri non valide : %(sort_key)s. Impossible d'effectuer un tri d'après " "cette propriété. " #, python-format msgid "Invalid status value: %s" msgstr "Valeur de statut non valide : %s" #, python-format msgid "Invalid status: %s" msgstr "Statut non valide : %s" #, python-format msgid "Invalid time format for %s." msgstr "Format d'heure non valide pour %s." msgid "Invalid type definition" msgstr "Définition de type non valide" #, python-format msgid "Invalid type value: %s" msgstr "Type de valeur non valide: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "Mise à jour non valide. Elle créerait une de définition de métadonnées en " "double avec le nom %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Mise à jour non valide. Elle créerait un objet de définition de métadonnées " "en double avec le nom %(name)s dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Mise à jour non valide. Elle créerait un objet de définition de métadonnées " "en double avec le nom %(name)s dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "Mise à jour non valide. Elle créerait une propriété de définition de " "métadonnées avec le nom %(name)s dans l'espace de nom %(namespace_name)s." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "" "Valeur non valide '%(value)s' pour le paramètre '%(param)s' : %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "Valeur non valide pour l'option %(option)s : %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "Valeur de visibilité non valide : %s" msgid "Is not allowed value" msgstr "N'est pas une valeur autorisée" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "Vous avez importé le module Eventlet avant de définir le paramètre %s='yes'. " "Actuellement, vous devez désactiver eventlet.greendns si vous utilisez ipv6 " "car eventlet.greendns s'interrompt avec les adresses ipv6. Veillez à ne pas " "importer eventlet avant cette définition." msgid "It's invalid to provide multiple image sources." msgstr "Il est invalide de fournir plusieurs sources d'image" msgid "It's not allowed to add locations if locations are invisible." msgstr "" "L'ajout des emplacements n'est pas autorisé si les emplacements sont " "invisibles." msgid "It's not allowed to remove locations if locations are invisible." msgstr "" "La suppression des emplacements n'est pas autorisée si les emplacements sont " "invisibles." msgid "It's not allowed to update locations if locations are invisible." msgstr "" "La mise à jour des emplacements n'est pas autorisée si les emplacements sont " "invisibles." msgid "Items have to be unique" msgstr "Les éléments doivent être uniques" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "Le chemin Json doit commencer par '/' et ne pas se terminer par '/'. " "L'utilisation de deux '/' consécutifs n'est pas autorisée." msgid "Legacy image was not found" msgstr "L'image existante est introuvable" msgid "Length is greater than maximum" msgstr "La longueur est supérieure à la valeur maximale" msgid "Length is less than minimum" msgstr "La longueur est inférieure à la valeur minimale" msgid "Limit param must be an integer" msgstr "Le paramètre Limit doit être un entier" msgid "Limit param must be positive" msgstr "Le paramètre Limit doit être positif" #, python-format msgid "Limit param must not be higher than %d" msgstr "Le paramètre Limit ne doit pas être supérieur à %d" msgid "Limits request ID length." msgstr "Limite la longueur de l'ID de la demande." msgid "List definitions may hot have defaults" msgstr "Les définitions de listes peuvent ne pas avoir de valeurs par défaut" msgid "List of strings related to the image" msgstr "Liste des chaînes associées à l'image" msgid "List size is greater than maximum" msgstr "La taille de la liste a une valeur supérieure à la valeur maximale" msgid "List size is less than minimum" msgstr "La taille de la liste a une valeur inférieure à la valeur minimale" msgid "Loop time between checking for new items to schedule for delete." msgstr "" "Durée de la boucle entre les recherches de nouveaux éléments à planifier " "pour la suppression." #, python-format msgid "Malformed Content-Range header: %s" msgstr "En-tête Content-Range incorrect : %s" msgid "Malformed JSON in request body." msgstr "JSON incorrect dans le corps de demande." msgid "Max string length may not exceed 255 characters" msgstr "" "La longueur maximale de la chaîne ne peut pas être supérieure à 255 " "caractères" msgid "Maximal age is count of days since epoch." msgstr "L'ancienneté maximale est le nombre de jours depuis l'epoch." msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "Taille maximale de ligne des en-têtes de message à accepter. max_header_line " "peut avoir besoin d'être augmenté lors de l'utilisation de grands jetons " "(généralement ceux qui sont générés par l'API Keystone v3 avec des " "catalogues de service volumineux)" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" "Nombre maximum de membres d'image par image. Une valeur négative signifie " "illimité." msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Nombre maximum d'emplacements autorisés sur une image. Une valeur négative " "signifie illimité." msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "" "Nombre maximum de propriétés autorisées sur une image. Une valeur négative " "signifie illimité." msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" "Nombre maximum de balises autorisées sur une image. Une valeur négative " "signifie illimité." msgid "Maximum permissible number of items that could be returned by a request" msgstr "" "Nombre d'éléments maximum autorisés qui peuvent être renvoyés par une demande" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "Le nombre maximum de redirections (%(redirects)s) a été dépassé." msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "Taille maximum en octets d'une image qu'un utilisateur peut télécharger. " "Valeur 1099511627776 octets (1 To). AVERTISSEMENT : cette valeur ne doit " "être augmentée qu'après un examen approfondi et doit être définie à une " "valeur inférieure à 8 EB (9223372036854775808)." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "Le membre %(member_id)s est en double pour l'image %(image_id)s" msgid "Member can't be empty" msgstr "Membre ne peut pas être vide" msgid "Member to be added not specified" msgstr "Membre à ajouter non spécifié" msgid "Membership could not be found." msgstr "Appartenance non trouvée." #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "L'espace de nom %(namespace)s de la définition de métadonnées est protégé et " "ne peut pas être supprimé." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "" "L'espace de nom de définition de métadonnées est introuvable pour l'ID %s" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "" "L'espace de nom %(namespace_name)s de la définition de métadonnées est " "introuvable." #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "L'objet %(object_name)s de la définition de métadonnées est protégé et ne " "peut pas être supprimé." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "L'objet de définition de métadonnées est introuvable pour l'ID %s" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "La propriété %(property_name)s de la définition de métadonnées est protégée " "et ne peut pas être supprimé." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "La propriété de définition de métadonnées est introuvable pour l'ID %s" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Le type de ressource %(resource_type_name)s de la définition de métadonnées " "est un type prédéfiniet ne peut pas être supprimé." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "L'association de type de ressource %(resource_type)s de la définition de " "métadonnées est protégée et ne peut pas être supprimée." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" "La balise de définition de métadonnées %(tag_name)s est protégée et ne peut " "pas être supprimée." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "La balise de définition de métadonnées est introuvable pour l'ID %s" msgid "Min string length may not be negative" msgstr "La longueur minimale de la chaîne ne doit pas être négative" msgid "Minimal rows limit is 1." msgstr "Le nombre minimal de lignes est 1." #, python-format msgid "Missing required credential: %(required)s" msgstr "Données d'identification obligatoires manquantes : %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "Plusieurs correspondances de service 'image' pour la région %(region)s. En " "général, cela signifie qu'une région est requise et que vous n'en avez pas " "indiquée." msgid "Must supply a positive, non-zero value for age." msgstr "Veuillez fournir une valeur positive différente de zéro pour age." msgid "Name of the paste configuration file." msgstr "Nom du fichier de configuration de collage." #, python-format msgid "No artifact found with ID %s" msgstr "Aucun artefact avec l'ID %s n'a été trouvé. " msgid "No authenticated user" msgstr "Aucun utilisateur authentifié" #, python-format msgid "No image found with ID %s" msgstr "aucune image trouvée avec l'identifiant %s" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "Aucun emplacement trouvé avec l'ID %(loc)s dans l'image %(img)s" msgid "No permission to share that image" msgstr "Aucun droit de partage de cette image" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "Aucun plug-in n'a été chargé pour %(name)s" msgid "No property to access" msgstr "Aucune propriété à laquelle accéder" #, python-format msgid "No such key '%s' in a dict" msgstr "Aucune clé '%s' dans un dict" #, python-format msgid "Not a blob property '%s'" msgstr "N'est pas une propriété blob '%s'" msgid "Not a downloadable entity" msgstr "N'est pas une entité téléchargeable" msgid "Not a list property" msgstr "N'est pas une propriété list" #, python-format msgid "Not a list property '%s'" msgstr "N'est pas une propriété list '%s'" msgid "Not a valid value type" msgstr "Type de valeur non valide" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "Les dépendances n'ont pas toutes l'état '%s'" #, python-format msgid "Not allowed to create members for image %s." msgstr "Non autorisé à créer des membres pour l'image %s." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "Non autorisé à désactiver l'image dans l'état '%s'" #, python-format msgid "Not allowed to delete members for image %s." msgstr "Non autorisé à supprimer des membres de l'image %s." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "Non autorisé à supprimer des balises de l'image %s." #, python-format msgid "Not allowed to list members for image %s." msgstr "Non autorisé à répertorier les membres de l'image %s." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "Non autorisé à réactiver l'image dans l'état '%s'" #, python-format msgid "Not allowed to update members for image %s." msgstr "Non autorisé à mettre à jour les membres de l'image %s." #, python-format msgid "Not allowed to update tags for image %s." msgstr "Non autorisé à mettre à jour des balises de l'image %s." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "Non autorisé à télécharger des données image pour l'image %(image_id)s : " "%(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "N'est pas un index de matrice '%s'" msgid "Number of sort dirs does not match the number of sort keys" msgstr "Le nombre de rép de tri ne correspond pas au nombre de clés de tri" msgid "OVA extract is limited to admin" msgstr "L'extraction de fichiers OVA est limitée à admin" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "Les métadonnées OVF désirées n'ont pas été spécifiées dans le fichier de " "configuration ovf-metadata.json. Définissez \"cim_pasd\" avec une liste de " "propriétés CIM_ProcessorAllocationSettingData intéressées." msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "" "Le fichier de configuration OVF \"ovf-metadata.json\" n'a pas été trouvé." msgid "Old and new sorting syntax cannot be combined" msgstr "" "Les syntaxes de tri anciennes et nouvelles ne peuvent pas être combinées" msgid "Only list indexes are allowed for blob lists" msgstr "Seuls les index de listes sont autorisés pour les listes Blob" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "L'opération \"%s\" requiert un membre nommé \"value\"." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "Les objets d'opération doivent contenir exactement un seul membre nommé \"add" "\", \"remove\" ou \"replace\"." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "Les objets d'opération doivent contenir un seul membre nommé \"add\", " "\"remove\" ou \"replace\"." msgid "Operations must be JSON objects." msgstr "Les opérations doivent être des objets JSON." #, python-format msgid "Operator %(op)s is not supported" msgstr "L'opérateur %(op)s n'est pas pris en charge" #, python-format msgid "Original locations is not empty: %s" msgstr "L'emplacement original %s n'est pas vide" msgid "Owner can't be updated by non admin." msgstr "Le propriétaire ne peut être mis à jour que par un administrateur." msgid "Owner must be specified to create a tag." msgstr "Le propriétaire doit être indiqué pour créer une balise." msgid "Owner of the image" msgstr "Propriétaire de l'image" msgid "Owner of the namespace." msgstr "Propriétaire de l'espace de nom." msgid "Param values can't contain 4 byte unicode." msgstr "" "Les valeurs de paramètre ne peuvent pas contenir de caractère Unicode de 4 " "octets." msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "Nom partiel d'un pipeline dans le fichier de configuration de collage avec " "le nom de service supprimé. Par exemple, si votre section de collage est " "nommée [pipeline:glance-api-keystone], utilisez la valeur \"keystone\"" msgid "Path to the directory where json metadata files are stored" msgstr "" "Chemin d'accès au répertoire où les fichiers de métadonnées json sont stockés" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" "Le nom de plug-in %(plugin)s doit correspondre au nom de type %(name)s de " "l'artefact" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "Le pointeur `%s` contient \"~\" qui ne fait pas partie d'une séquence " "d'échappement reconnue." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "Le pointeur `%s` contient des éléments \"/\" adjacent." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "le Pointeur '%s' ne contient pas de jeton valide." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "Le pointeur `%s` ne commence pas par \"/\"." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "le pointeur '%s' se termine avec un \"/\"." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "" "Le pointeur contient '~' qui ne fait pas partie d'une séquence d'échappement " "reconnue[~0, ~1]." #, python-format msgid "Port \"%s\" is not valid." msgstr "Port \"%s\" n'est pas valide." msgid "Port the registry server is listening on." msgstr "Port sur lequel le serveur de registre est en mode écoute." #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "" "Taille du composant numérique Prerelease trop importante (%d caractères max)" msgid "Private key file to use when starting API server securely." msgstr "" "Fichier de clé privée à utiliser lors du démarrage du serveur d'API en toute " "sécurité." #, python-format msgid "Process %d not running" msgstr "Le processus %d n'est pas en fonctionnement" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "" "Les propriétés %s doivent être définies avant de sauvegarder les données." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "La propriété %(property_name)s ne commence pas par le préfixe d'association " "de type de ressource attendu : '%(prefix)s'." #, python-format msgid "Property %s already present." msgstr "Propriété %s déjà présente." #, python-format msgid "Property %s does not exist." msgstr "La propriété %s n'existe pas." #, python-format msgid "Property %s may not be removed." msgstr "La propriété %s n'est peut-être pas supprimée." #, python-format msgid "Property %s must be set prior to saving data." msgstr "La propriété %s doit être définie avant de sauvegarder les données." #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "La propriété %(name)s n'a peut-être pas la valeur %(val)s : %(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "La propriété '%s' est protégée" msgid "Property names can't contain 4 byte unicode." msgstr "" "Les noms de propriété ne peuvent pas contenir de caractère Unicode de 4 " "octets." #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "Le verrouillage des propriétés sur l'opération %(operation)s pour la règle " "%(rule)s est introuvable. Aucun rôle ne sera autorisé à effectuer cette " "opération." #, python-format msgid "Property's %(prop)s value has not been found" msgstr "La valeur %(prop)s de la propriété est introuvable" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "La taille de l'image fournie doit correspondre à la taille de l'image " "stockée. (taille fournie : %(ps)d, taille stockée : %(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "L'objet fourni ne correspond pas au schéma '%(schema)s' : %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "Le statut fourni de la tâche n'est pas pris en charge : %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "Le type de tâche fourni n'est pas pris en charge : %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "Fournit une description conviviale de l'espace de nom." msgid "Public images do not have members." msgstr "Les images publiques n'ont pas de membre." msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "URL publique à utiliser pour le noeud final des versions. La valeur par " "défaut est None : l'attribut host_url de la demande est employé pour remplir " "la base de l'URL. Si Glance s'exécute derrière un proxy, vous devrez changer " "cette valeur pour représenter l'URL du proxy." msgid "Python module path of data access API" msgstr "API d'accès aux données du chemin du module Python" msgid "Received invalid HTTP redirect." msgstr "Redirection HTTP non valide reçue." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "Redirection vers %(uri)s pour autorisation." #, python-format msgid "Registry service can't use %s" msgstr "Le service de registre ne peut pas utiliser %s" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "Le registre n'a pas été configuré correctement sur le serveur d'API. Cause : " "%(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "La relation %(name)s peut n'avoir qu'une valeur" #, python-format msgid "Reload of %(serv)s not supported" msgstr "Rechargement de %(serv)s non pris en charge" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Rechargement de %(serv)s (pid %(pid)s) avec le signal (%(sig)s)" #, python-format msgid "Removing stale pid file %s" msgstr "Suppression du fichier PID %s périmé" msgid "Request body must be a JSON array of operation objects." msgstr "Le corps de la demande doit être une matrice JSON d'objets Opération." msgid "Request must be a list of commands" msgstr "La demande doit être une liste de commandes" msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "" "Les propriétés obligatoires de l'image pour la vérification de signature " "n'existent pas. Impossible de vérifier la signature." #, python-format msgid "Required store %s is invalid" msgstr "Le magasin requis %s n'est pas valide" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "Les noms de type de ressource doivent être alignés avec les types de " "ressource Heat dans la mesure du possible : http://docs.openstack.org/" "developer/heat/template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "La réponse de Keystone ne contient pas un noeud final Glance." msgid "Role used to identify an authenticated user as administrator." msgstr "" "Rôle utilisé pour identifier un utilisateur authentifié comme administrateur." msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "Exécuter en tant que processus interruptible. Lorsqu'aucune valeur n'est " "indiquée (par défaut), exécute l'opération d'épuration une fois, puis quitte " "le programme. Lorsqu'une valeur est indiquée, ne quitte pas le programme et " "exécute l'épuration à l'intervalle wakeup_time indiquée dans la " "configuration." msgid "Scope of image accessibility" msgstr "Périmètre d'accessibilité de l'image" msgid "Scope of namespace accessibility." msgstr "Périmètre de l'accessibilité de l'espace de nom." msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "" "Clé secrète à utiliser pour signer les messages de suivi des services Glance " "API et Glance Registry." #, python-format msgid "Server %(serv)s is stopped" msgstr "Le serveur %(serv)s est arrêté" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "Echec de la création de travailleur de serveur : %(reason)s." msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "Définissez un quota système suffisant pour chaque utilisateur. Cette valeur " "est la capacité totale qu'un utilisateur peut utiliser dans tous les " "systèmes de stockage. Une valeur nulle signifie sans limite. Il est " "possible d'indiquer une unité optionnelle pour la valeur. Les unités " "acceptées sont B, KB, MB, GB et TB représentant respectivement des octets, " "des kilooctets, des mégaoctets, des gigaoctets et des téraoctets. Si aucune " "unité n'est indiquée, les octets sont utilisés par défaut. La valeur et " "l'unité doivent être accolées et les unités distinguent les majuscules et " "les minuscules." #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "" "L'affichage du niveau %(shl)s n'est pas pris en charge dans cette opération" msgid "Signature verification failed" msgstr "La vérification de la signature a échoué" msgid "Signature verification failed." msgstr "La vérification de la signature a échoué." msgid "Size of image file in bytes" msgstr "Taille du fichier image en octets" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "Certains types de ressource autorisent plusieurs paires clé-valeur par " "instance. Par exemple, Cinder autorise les métadonnées d'utilisateur et " "d'image sur les volumes. Seules les métadonnées de propriétés d'image sont " "évaluées par Nova (planification ou pilotes). Cette propriété autorise une " "cible d'espace de nom pour lever l'ambiguïté." msgid "Sort direction supplied was not valid." msgstr "Le sens de tri fourni n'était pas valide." msgid "Sort key supplied was not valid." msgstr "La clé de tri fournie n'était pas valide." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "Spécifie le préfixe à utiliser pour le type de ressource donné. Toutes les " "propriétés de l'espace de nom doivent être précédées de ce préfixe " "lorsqu'elles s'appliquent au type de ressource spécifié. Vous devez inclure " "un séparateur de préfixe (par exemple, le signe deux-points :)." msgid "Specifies which task executor to be used to run the task scripts." msgstr "" "Indique le programme d'exécution de tâche à utiliser pour exécuter les " "scripts de tâche." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "L'état doit être \"en attente\", \"accepté\" ou \"rejeté\"." msgid "Status not specified" msgstr "Statut non spécifié" msgid "Status of the image" msgstr "Statut de l'image" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "La transition de statut de %(cur_status)s vers %(new_status)s n'est pas " "autorisée" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "Arrêt de %(serv)s (pid %(pid)s) avec le signal (%(sig)s)" #, python-format msgid "Store for image_id not found: %s" msgstr "Magasin de l'image_id non trouvé : %s" #, python-format msgid "Store for scheme %s not found" msgstr "Magasin du schéma %s non trouvé" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "%(attr)s (%(supplied)s) fournis et %(attr)s générés depuis l'image " "téléchargée (%(actual)s) ne correspondent pas. Définition du statut de " "l'image sur 'arrêté'." msgid "Supported values for the 'container_format' image attribute" msgstr "Valeurs prises en charge pour l'attribut d'image 'container_format'" msgid "Supported values for the 'disk_format' image attribute" msgstr "Valeurs prises en charge pour l'attribut d'image 'disk_format'" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "La relance supprimée en tant que %(serv)s était %(rsn)s." msgid "System SIGHUP signal received." msgstr "Signal SIGHUP du système reçu." #, python-format msgid "Task '%s' is required" msgstr "La tâche '%s' est obligatoire" msgid "Task does not exist" msgstr "La tâche n'existe pas" msgid "Task failed due to Internal Error" msgstr "Echec de la tâche en raison d'une erreur interne" msgid "Task was not configured properly" msgstr "La tâche n'a pas été configurée correctement" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "La tâche avec l'identificateur donné %(task_id)s est introuvable" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "Le filtre \"changes-since\" n'est plus disponible sur la version 2." #, python-format msgid "The CA file you specified %s does not exist" msgstr "" "Le fichier d'autorité de certification que vous avez spécifié %s n'existe pas" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "L'objet image %(image_id)s créé par la tâche %(task_id)s n'est plus dans un " "statut valide pour un traitement ultérieur." msgid "The Store URI was malformed." msgstr "L'URI de magasin était incorrect." msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "URL du service keystone. Si \"use_user_token\" n'est pas en vigueur et si " "vous utilisez l'authentification keystone, l'URL de keystone peut être " "spécifiée." msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "" "Adresse où le service d'authentification Swift est en mode écoute. (obsolète)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Mot de passe de l'administrateur. Si \"use_user_token\" n'est pas en " "vigueur, les données d'identification de l'administrateur peuvent être " "spécifiées." msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "Nom d'utilisateur administrateur. Si \"use_user_token\" n'est pas en " "vigueur, les données d'identification de l'administrateur peuvent être " "spécifiées." msgid "The amount of time in seconds to delay before performing a delete." msgstr "Temps de retard en secondes avant toute suppression." msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "Durée de conservation d'une image incomplète dans le cache avant que le " "programme de nettoyage du cache, s'il est exécuté, ne supprime cette image." msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "" "Valeur de commandes en attente qui sera utilisée lors de la création du " "connecteur de programme d'écoute TCP." #, python-format msgid "The cert file you specified %s does not exist" msgstr "Le fichier de certificats que vous avez spécifié %s n'existe pas" msgid "The config file that has the swift account(s)configs." msgstr "Fichier de configuration possédant les configurations de compte SWIFT." msgid "The current status of this task" msgstr "Le statut actuel de cette tâche" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "L'unité hébergeant le répertoire de cache d'image %(image_cache_dir)s ne " "prend pas en charge xattr. Vous devez probablement éditer votre fstab et " "ajouter l'option user_xattr sur la ligne appropriée de l'unité hébergeant le " "répertoire de cache." msgid "The driver to use for image cache management." msgstr "Pilote à utiliser pour la gestion de cache d'image." #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "" "Le format de la version %s n'est pas valide. Utilisez la notation semver " msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "Format dans lequel les images seront automatiquement converties. Lorsque " "vous utilisez le back-end RBD, cette valeur doit être définie avec 'raw'" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "L'identificateur URI fourni n'est pas valide. Indiquez un identificateur URI " "valide sélectionné dans la liste des identificateurs URI pris en charge : " "%(supported)s" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "" "Nom d'hôte/IP du processus pydev en mode écoute pour les connexions de " "débogage" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "L'image %s est déjà présente sur l'esclave mais notre recherche n'a pas " "permis de la trouver. Cela indique que nous sommes pas autorisés à afficher " "toutes les images sur le serveur esclave." #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "L'objet BLOB d'artefact entrant est trop volumineux : %s" #, python-format msgid "The incoming image is too large: %s" msgstr "L'image entrante est trop grande : %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "Le fichier de clés que vous avez spécifié %s n'existe pas" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "La limite a été dépassée sur le nombre d'emplacements d'image autorisés. " "Tentatives : %(attempted)s, Maximum : %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "La limite a été dépassée sur le nombre de membres d'image autorisés pour " "cette image. Tentatives : %(attempted)s, Maximum : %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "La limite a été dépassée sur le nombre de propriétés d'image autorisées. " "Tentatives : %(attempted)s, Maximum : %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "La limite a été dépassée sur le nombre de propriétés d'image autorisées. " "Tentatives : %(num)s, Maximum : %(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "La limite a été dépassée sur le nombre de balises d'image autorisées. " "Tentatives : %(attempted)s, Maximum : %(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "L'emplacement %(location)s existe déjà" #, python-format msgid "The location data has an invalid ID: %d" msgstr "Les données d'emplacement possèdent un ID non valide : %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "Emplacement du fichier de protection des propriétés. Ce fichier contient les " "règles pour les protections de propriétés et les règles associées. Si cette " "valeur de configuration n'est pas spécifiée, les protections de propriétés " "ne seront pas imposées par défaut. Si une valeur est spécifiée et que le " "fichier est introuvable, le service glance-api ne démarrera pas." #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "La définition de métadonnées %(record_type)s avec le nom %(record_name)s n'a " "pas été supprimée. Elle est encore associée à d'autres enregistrements." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" "L'espace de nom %(namespace_name)s de la définition de métadonnées existe " "déjà." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "L'objet %(object_name)s de la définition de métadonnées est introuvable dans " "l'espace de nom %(namespace_name)s." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "La propriété %(property_name)s de la définition de métadonnées est " "introuvable dans l'espace de nom %(namespace_name)s." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "L'association de type de ressource de la définition de métadonnées entre " "letype de ressource %(resource_type_name)s et l'espace de nom " "%(namespace_name)s existe déjà." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "L'association de type de ressource de la définition de métadonnées entre " "letype de ressource %(resource_type_name)s et l'espace de nom " "%(namespace_name)s est introuvable." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "Le type de ressource %(resource_type_name)s de la définition de métadonnées " "est introuvable." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "La balise de définition de métadonnées nommée %(name)s est introuvable dans " "l'espace de nom %(namespace_name)s." msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "" "Mode dans lequel le moteur sera exécuté. Peut être 'série' ou 'parallèle'." msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "Nombre d'agents de processus enfant qui seront créés pour répondre aux " "demandes de service. La valeur par défaut doit être égale au nombre d'UC " "disponibles." msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "Nombre d'activités parallèles exécutées au même moment par le moteur. La " "valeur peut être supérieure à un lorsque le mode de fonctionnement du moteur " "est 'parallèle'." msgid "The parameters required by task, JSON blob" msgstr "Les paramètres requis par la tâche, blob JSON" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "Chemin d'accès au fichier cert à utiliser dans les connexions SSL au " "serveur, le cas échéant. Sinon, vous pouvez définir la variable " "d'environnement GLANCE_CLIENT_CERT_FILE sur le chemin d'accès du fichier de " "certificat de l'autorité de certification" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "Chemin d'accès au fichier cert d'autorité de certification à utiliser dans " "les connexions SSL au serveur de registre, le cas échéant. Sinon, vous " "pouvez définir la variable d'environnement GLANCE_CLIENT_CA_FILE sur le " "chemin d'accès du fichier de certificat de l'autorité de certification ." msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "Chemin d'accès au fichier de clés à utiliser dans les connexions SSL au " "serveur, le cas échéant. Sinon, vous pouvez définir la variable " "d'environnement GLANCE_CLIENT_KEY_FILE sur le chemin d'accès du fichier de " "clés" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "" "Chemin vers la base de données de fichiers sqlite qui sera utilisée pour la " "gestion du cache d'image ." msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "Période en secondes pendant laquelle le serveur d'API doit attendre qu'une " "demande de registre se termine. Une valeur de 0 signifie aucun délai " "d'attente." msgid "The port on which a pydev process is listening for connections." msgstr "Port sur lequel un processus pydev est en mode écoute des connexions." msgid "The port on which the server will listen." msgstr "Le port sur lequel le serveur écoutera." msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "" "Protocole à utiliser pour la communication avec le serveur de registre. http " "ou https." #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "" "Le corps %(body)s fourni n'est pas valide dans le cadre du schéma indiqué : " "%(schema)s" msgid "The provided image is too large." msgstr "L'image fournie est trop volumineuse." #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "Le chemin %(path)s fourni n'est pas valide : %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "Référence aux paramètres du magasin/compte SWIFT par défaut à utiliser pour " "l'ajout de nouvelles images." msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "Région du service d'authentification. Si \"use_user_token\" n'est pas en " "vigueur et si vous utilisez l'authentification keystone, le nom de région " "peut être spécifié." msgid "The request returned 500 Internal Server Error." msgstr "La demande a renvoyé le message 500 Internal Server Error." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "La demande a renvoyé le message 503 Service Unavailable. Cela se produit " "généralement lors d'une surcharge de service ou de tout autre coupure " "transitoire." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "La demande a renvoyé un message 302 Multiple Choices. Cela signifie " "généralement que vous n'avez pas inclus d'indicateur de version dans l'URI " "de demande.\n" "\n" "Le corps de la réponse a renvoyé :\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La demande a renvoyé un message 413 Request Entity Too Large. Cela signifie " "généralement que le taux limite ou le seuil de quota a été dépassé.\n" "\n" "Corps de la réponse :\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "La demande a renvoyé un statut inattendu : %(status)s.\n" "\n" "Corps de la réponse :\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "L'image demandée a été désactivée. Le téléchargement des données image est " "interdit." msgid "The result of current task, JSON blob" msgstr "Le résultat de la tâche en cours, blob JSON" msgid "The signature data was not properly encoded using base64" msgstr "Les données de signature n'ont pas été codées correctement en base64" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "La taille des données %(image_size)s dépassera la limite. %(remaining)s " "octets restants." msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "Taille de pool d'unités d'exécution à utiliser pour l'épuration nettoyage " "des images. Valeur par défaut : 1, ce qui correspond à une épuration série. " "Toute valeur supérieure à 1 indique le nombre maximal d'images pouvant être " "épurées en parallèle." #, python-format msgid "The specified member %s could not be found" msgstr "Le membre spécifié %s est introuvable" #, python-format msgid "The specified metadata object %s could not be found" msgstr "L'objet métadonnées spécifié %s est introuvable" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "La balise de métadonnées %s est introuvable" #, python-format msgid "The specified namespace %s could not be found" msgstr "L'espace de nom spécifié %s est introuvable" #, python-format msgid "The specified property %s could not be found" msgstr "La propriété spécifiée %s est introuvable" #, python-format msgid "The specified resource type %s could not be found " msgstr "Le type de ressource spécifié %s est introuvable " msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "L'état de l'emplacement de l'image supprimée ne peut être réglé que sur " "'pending_delete' ou 'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "L'état de l'emplacement de l'image supprimée ne peut être réglé que sur " "'pending_delete' ou 'deleted'." msgid "The status of this image member" msgstr "Statut de ce membre d'image" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "Les noms de magasin à utiliser pour obtenir l'ordre de préférence des " "magasins. Le nom doit être enregistré par l'un des magasins définis par " "l'option de configuration 'stores'. Cette option sera appliquée lors de " "l'utilisation de l'option 'store_type' comme stratégie d'emplacement d'image " "définie par l'option de configuration 'location_strategy'." msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "Stratégie à utiliser pour l'authentification. Si \"use_user_token\" n'est " "pas en vigueur, la stratégie d'authentification peut être spécifiée." #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "Le membre cible %(member_id)s est déjà associé à l'image %(image_id)s." msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "Nom de locataire de l'utilisateur administrateur. Si \"use_user_token\" " "n'est pas en vigueur, le nom de locataire de l'administrateur peut être " "spécifié." msgid "The type of task represented by this content" msgstr "Le type de tâche représenté par ce contenu" msgid "The unique namespace text." msgstr "Texte unique de l'espace de nom." msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "Limite supérieure (taille maximale du cache cumulé en octets) au dessus de " "laquelle le programme de nettoyage du cache, s'il est exécuté, commence à " "nettoyer le cache d'image." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "Nom convivial de l'espace de nom. Utilisé par l'interface utilisateur si " "disponible." msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "" "Utilisateur à authentifier auprès du service d'authentification Swift " "(obsolète)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "Valeur de l'option de socket TCP_KEEPIDLE. Durée en secondes pendant " "laquelle la connexion doit être en veille avant que le protocole TCP " "commence à envoyer des sondes de signal de présence." #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "Problème lié à votre %(error_key_name)s %(error_filename)s. Veuillez " "vérifier. Erreur : %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "Problème lié à votre %(error_key_name)s %(error_filename)s. Veuillez " "vérifier. Erreur OpenSSL : %(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "Il y a un problème avec votre paire de clés. Vérifiez que le certificat " "%(cert_file)s et la clé %(key_file)s correspondent. Erreur OpenSSL %(ce)s" msgid "There was an error configuring the client." msgstr "Une erreur s'est produite lors de la configuration du client." msgid "There was an error connecting to a server" msgstr "Une erreur s'est produite lors de la connexion à un serveur." msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "Cette valeur de config indique si \"roles\" ou \"policies\" sont utilisés " "dans le fichier de verrouillage de propriétés." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "Cette opération n'est actuellement pas autorisée sur les tâches Glance. " "Elles sont supprimées automatiquement après avoir atteint l'heure définie " "par la propriété expires_at." msgid "This operation is currently not permitted on Glance images details." msgstr "" "Cette opération n'est pas actuellement autorisée sur des détails d'images " "Glance." msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "Cette valeur détermine quelle stratégie sera utilisée pour déterminer la " "stratégie location order de l'image. Actuellement, deux stratégies sont " "fournies avec Glance 'location_order' et 'store_type'." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "Durée de vie en heures d'une tâche suite à une réussite ou à un échec" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "Dépassement du délai d'attente pour les opérations socket des connexions " "client. Si une connexion entrante est inactive pendant ce nombre de " "secondes, elle sera fermée. La valeur '0' signifie une attente illimitée." msgid "Too few arguments." msgstr "Trop peu d'arguments." msgid "Too few locations" msgstr "Trop peu d'emplacements" msgid "Too many locations" msgstr "Trop d'emplacements" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "La taille totale est de %(size)d octets sur %(img_count)d images" msgid "Turn on/off delayed delete." msgstr "L'activation/la désactivation a retardé la suppression." msgid "Type version has to be a valid semver string" msgstr "La version du type doit être une chaîne semver valide" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "L'URI ne peut pas contenir plusieurs occurrences d'un schéma. Si vous avez " "spécifié un URI tel que swift://user:pass@http://authurl.com/v1/container/" "obj, vous devez le modifier pour utiliser le schéma swift+http://, par " "exemple : swift+http://user:pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "" "URL permettant d'accéder au fichier image conservé dans le magasin externe" msgid "Unable to PUT to non-empty collection" msgstr "PUT impossible vers une collection qui n'est pas vide" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "Impossible de créer le fichier PID %(pid)s. Exécution en tant que non " "root ?\n" "Rétablissement vers un fichier temporaire. Vous pouvez arrêter le service " "%(service)s avec :\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "Filtrage impossible avec l'opérateur inconnu '%s'." msgid "Unable to filter on a range with a non-numeric value." msgstr "Impossible de filtrer sur une plage avec une valeur non numérique." msgid "Unable to filter on a unknown operator." msgstr "Filtrage impossible avec un opérateur inconnu." msgid "Unable to filter using the specified operator." msgstr "Filtrage impossible à l'aide de l'opérateur spécifié." msgid "Unable to filter using the specified range." msgstr "Impossible de filtrer à l'aide de la plage spécifiée." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "Impossible de trouver '%s' dans la modification du schéma JSON" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "Impossible de localiser `op` dans la modification du schéma JSON. Doit être " "l'une des valeurs suivantes : %(available)s." msgid "Unable to get legacy image" msgstr "Impossible d'obtenir une image existante" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "Impossible d'augmenter la limite de descripteur de fichier. Exécution en " "tant que non root ?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "Impossible de charger %(app_name)s depuis le fichier de configuration " "%(conf_file)s.\n" "Résultat : %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "Impossible de charger le schéma : %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "" "Impossible de localiser le fichier de configuration du collage pour %s." msgid "Unable to modify collection in immutable or readonly property" msgstr "" "Impossible de modifier la collecte dans une propriété non modifiable ou en " "lecture seule" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "Impossible de récupérer le certificat avec l'ID : %s" msgid "Unable to retrieve request id from context" msgstr "Impossible d'extraire l'ID de demande du contexte" msgid "Unable to specify artifact type explicitly" msgstr "Impossible d'indiquer le type d'artefact de façon explicite" msgid "Unable to specify artifact type version explicitly" msgstr "Impossible d'indiquer la version du type d'artefact de façon explicite" msgid "Unable to specify version if multiple types are possible" msgstr "Impossible d'indiquer la version si plusieurs types sont possibles" msgid "Unable to specify version if type is not specified" msgstr "Impossible d'indiquer la version si le type n'est pas spécifié" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "" "Impossible de télécharger des données image en double pour l'image " "%(image_id)s : %(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "" "Impossible de vérifier la signature car l'algorithme n'est pas pris en " "charge sur ce système" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "Impossible de vérifier la signature : %(reason)s" msgid "Unauthorized image access" msgstr "Accès à l'image non autorisé" msgid "Unexpected body type. Expected list/dict." msgstr "Type de corps inattendu. Type attendu : list/dict." #, python-format msgid "Unexpected response: %s" msgstr "Réponse inattendue : %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "Stratégie d'autorisation inconnue '%s'" #, python-format msgid "Unknown command: %s" msgstr "commande %s inconnue" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "Sens de tri inconnu, doit être 'desc' ou 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "Version brouillon du schéma JSON non reconnue" msgid "Unrecognized changes-since value" msgstr "Valeur changes-since non reconnue" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "sort_dir non pris en charge. Valeurs acceptables : %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "sort_key non pris en charge. Valeurs acceptables : %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "La valeur %(value)d est hors bornes, elle ne doit pas dépasser %(max)d" msgid "Value is greater than maximum" msgstr "La valeur est supérieure à la valeur maximale" msgid "Value is less than minimum" msgstr "La valeur est inférieure à la valeur minimale" msgid "Value is required" msgstr "La valeur est obligatoire" #, python-format msgid "Version component is too large (%d max)" msgstr "Taille du composant de version trop importante (%d max)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "La version n'est pas valide : %(reason)s" msgid "Virtual size of image in bytes" msgstr "Taille virtuelle de l'image en octets" msgid "Visibility must be either \"public\" or \"private\"" msgstr "La visibilité doit être \"public\" ou \"private\"" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" "Attente de la fin du pid %(pid)s (%(file)s) pendant 15 secondes ; abandon en " "cours" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "Lorsque la valeur est false, aucun artefact ne peut être chargé, quelle que " "soit la valeur de available_plugins. Lorsque la valeur est true, les " "artefacts peuvent être chargés." msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "Lors de l'exécution du serveur en mode SSL, vous devez spécifier une valeur " "d'option cert_file et key_file dans votre fichier de configuration" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "Définie sur vrai, cette option définit le propriétaire d'une image comme " "locataire (tenant). Sinon, le propriétaire de l'image est l'utilisateur " "authentifié qui émet la demande." msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "Lors de l'utilisation de la couche Secure Sockets Layer dans les connexions " "au serveur de registre, ne nécessite pas de validation via une autorité de " "certification. L'action équivalente du registre consiste à spécifier --" "insecure sur la ligne de commande en utilisant le client glance pour l'API ." msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "Indique si les utilisateurs sont autorisés à spécifier des propriétés " "d'image en plus des propriétés fournies par le schéma image" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "Indique s'il faut inclure les emplacements d'image de backend dans les " "propriétés d'image. Par exemple, si vous utilisez le magasin de systèmes de " "fichiers, une URL de type \"file:///chemin/vers/image\" sera renvoyée à " "l'utilisateur dans le champ de métadonnées 'direct_url'. La révélation d'un " "emplacement de stockage peut présenter un risque de sécurité, donc utilisez " "ce paramètre avec précaution ! Définir ce paramètre sur true remplace " "l'option show_image_direct_url." msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "Indique si l'emplacement de stockage de l'image d'arrière-plan doit être " "inclus dans les propriétés de l'image. Ceci peut poser des problèmes de " "sécurité, donc soyez prudent dans l'utilisation de ce paramètre !" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "Indique si les en-têtes contenant les informations d'utilisateur et de " "locataire doivent être passées lors des demandes effectuées sur le registre. " "Cela permet au registre d'utiliser le middleware contextuel sans le " "middleware auth_token de keystonemiddleware, en supprimant les appels au " "service d'authentification keystone. Lors de l'utilisation de cette option, " "il est recommandé de sécuriser les communications entre l'API Glance et le " "registre Glance par des moyens autres que le middleware auth_token." msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "Transmettre le jeton utilisateur lors de demandes au registre. Pour éviter " "des échecs dus à l'expiration du jeton lors du téléchargement de fichiers " "volumineux, il est recommandé de définir de paramètre à 'False'.If Si " "\"use_user_token\" n'est pas activé, des données d'identification " "administrateur doivent être spécifiées." msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "Répertoire de travail pour les opérations de tâche asynchrone. Le répertoire " "défini ici sera utilisé pour exploiter les images, normalement avant de les " "importer dans le magasin de destination. Lorsque vous fournissez le " "répertoire de travail, assurez-vous d'avoir attribué suffisamment de place " "pour que les tâches simultanées s'exécutent de manière efficace sans manquer " "de place. Vous pouvez effectuer une estimation approximative en multipliant " "le nombre `max_workers` - ou le N d'agents en cours d'exécution - par une " "taille d'image moyenne (par exemple, 500 Mo). L'estimation de taille d'image " "doit être effectuée en fonction de la taille moyenne dans votre déploiement. " "Notez que selon les tâches en cours d'exécution, vous devrez peut-être " "multiplier ce nombre par un facteur qui dépendra de la complexité de la " "tâche. Par exemple, vous voudrez peut-être doubler la taille disponible si " "la conversion d'image est activée. Cela étant dit, rappelez-vous qu'il " "s'agit uniquement d'estimations et que vous devriez vous baser sur le " "scénario le moins favorable et être prêts à le modifier si jamais ces " "estimations s'avéraient fausses." #, python-format msgid "Wrong command structure: %s" msgstr "Structure de commande erronée : %s" msgid "You are not authenticated." msgstr "Vous n'êtes pas authentifié." msgid "You are not authorized to complete this action." msgstr "Vous n'êtes pas autorisé à effectuer cette action." #, python-format msgid "You are not authorized to lookup image %s." msgstr "Vous n'êtes pas autorisé à rechercher l'image %s." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "Vous n'êtes pas autorisé à rechercher les membres de l'image %s." #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "" "Vous n'êtes pas autorisé à créer une balise dans l'espace de nom détenu par " "'%s'" msgid "You are not permitted to create image members for the image." msgstr "Vous n'êtes pas autorisé à créer des membres image pour l'image." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "Vous n'êtes pas autorisé à créer des images détenues par '%s'." #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "Vous n'êtes pas autorisé à créer un espace de nom détenu par '%s'" #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "Vous n'êtes pas autorisé à créer un objet détenu par '%s'" #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "Vous n'êtes pas autorisé à créer une propriété détenue par '%s'" #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "" "Vous n'êtes pas autorisé à créer des types de ressource détenus par '%s'" #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "" "Vous n'êtes pas autorisé à créer cette tâche avec comme propriétaire : %s" msgid "You are not permitted to deactivate this image." msgstr "Vous n'êtes pas autorisé à désactiver cette image." msgid "You are not permitted to delete this image." msgstr "Vous n'êtes pas autorisé à supprimer cette image." msgid "You are not permitted to delete this meta_resource_type." msgstr "Vous n'êtes pas autorisé à supprimer le paramètre meta_resource_type." msgid "You are not permitted to delete this namespace." msgstr "Vous n'êtes pas autorisé à supprimer cet espace de nom." msgid "You are not permitted to delete this object." msgstr "Vous n'êtes pas autorisé à supprimer cet objet." msgid "You are not permitted to delete this property." msgstr "Vous n'êtes pas autorisé à supprimer cette propriété." msgid "You are not permitted to delete this tag." msgstr "Vous n'êtes pas autorisé à supprimer cette balise." #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "Vous n'êtes pas autorisé à modifier '%(attr)s' sur cette %(resource)s." #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "Vous n'êtes pas autorisé à modifier '%s' sur cette image." msgid "You are not permitted to modify locations for this image." msgstr "Vous n'êtes pas autorisé à modifier les emplacements pour cette image." msgid "You are not permitted to modify tags on this image." msgstr "Vous n'êtes pas autorisé à modifier les balises pour cette image." msgid "You are not permitted to modify this image." msgstr "Vous n'êtes pas autorisé à modifier cette image." msgid "You are not permitted to reactivate this image." msgstr "Vous n'êtes pas autorisé à réactiver cette image." msgid "You are not permitted to set status on this task." msgstr "Vous n'êtes pas autorisé à définir le statut pour cette tâche." msgid "You are not permitted to update this namespace." msgstr "Vous n'êtes pas autorisé à mettre à jour cet espace de nom." msgid "You are not permitted to update this object." msgstr "Vous n'êtes pas autorisé à mettre à jour cet objet." msgid "You are not permitted to update this property." msgstr "Vous n'êtes pas autorisé à mettre à jour cette propriété." msgid "You are not permitted to update this tag." msgstr "Vous n'êtes pas autorisé à mettre à jour cette balise." msgid "You are not permitted to upload data for this image." msgstr "Vous n'êtes pas autorisé à télécharger des données pour cette image." #, python-format msgid "You cannot add image member for %s" msgstr "Vous ne pouvez pas ajouter le membre image pour %s" #, python-format msgid "You cannot delete image member for %s" msgstr "Vous ne pouvez pas supprimer le membre image pour %s" #, python-format msgid "You cannot get image member for %s" msgstr "Vous ne pouvez pas obtenir le membre image pour %s" #, python-format msgid "You cannot update image member %s" msgstr "Vous ne pouvez pas mettre à jour le membre image pour %s" msgid "You do not own this image" msgstr "Vous n'êtes pas propriétaire de cette image" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "Vous avez choisi d'utiliser SSL pour la connexion et avez fourni un " "certificat, cependant, vous n'avez pas fourni de paramètre key_file ou " "n'avez pas défini la variable d'environnement GLANCE_CLIENT_KEY_FILE" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "Vous avez choisi d'utiliser SSL pour la connexion et avez fourni une clé, " "cependant, vous n'avez pas fourni de paramètre cert_file ou n'avez pas " "défini la variable d'environnement GLANCE_CLIENT_CERT_FILE" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() a récupéré un argument de mot clé '%s' inattendu" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "impossible d'effectuer la transition depuis %(current)s vers %(next)s dans " "la mise à jour (voulu : from_state=%(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" "propriétés personnalisées (%(props)s) en conflit avec les propriétés de base" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" "Les concentrateurs Eventlet 'poll' et 'selects' sont indisponibles sur cette " "plateforme" msgid "is_public must be None, True, or False" msgstr "is_public doit être None, True ou False" msgid "limit param must be an integer" msgstr "le paramètre limit doit être un entier" msgid "limit param must be positive" msgstr "le paramètre limit doit être positif" #, python-format msgid "location: %s data lost" msgstr "emplacement : %s données perdues" msgid "md5 hash of image contents." msgstr "Hachage md5 du contenu d'image." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() a récupéré des mots-clés %s inattendus" msgid "protected must be True, or False" msgstr "protected doit être True ou False" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "impossible de lancer %(serv)s. Erreur : %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id est trop long, sa taille maximale est de %s" glance-12.0.0/glance/locale/glance-log-error.pot0000664000567000056710000002425112701407047022545 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the glance project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev23\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-09 06:18+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: glance/location.py:485 #, python-format msgid "" "Glance tried all active locations to get data for image %s but all have " "failed." msgstr "" #: glance/notifier.py:380 glance/api/common.py:91 #, python-format msgid "An error occurred during image.send notification: %(err)s" msgstr "" #: glance/scrubber.py:275 #, python-format msgid "Can not get scrub jobs from queue: %s" msgstr "" #: glance/scrubber.py:335 #, python-format msgid "Unable to scrub image %(id)s from a location. Reason: %(exc)s " msgstr "" #: glance/api/common.py:54 #, python-format msgid "" "An error occurred reading from backend storage for image %(image_id)s: " "%(err)s" msgstr "" #: glance/api/common.py:60 #, python-format msgid "" "Backend storage for image %(image_id)s disconnected after writing only " "%(bytes_written)d bytes" msgstr "" #: glance/api/common.py:119 #, python-format msgid "Invalid value for option user_storage_quota: %(users_quota)s" msgstr "" #: glance/api/glare/v0_1/glare.py:399 glance/api/glare/v0_1/glare.py:404 msgid "Failed to upload artifact blob data due to HTTP error" msgstr "" #: glance/api/glare/v0_1/glare.py:410 msgid "Failed to upload artifact blob data due to internal error" msgstr "" #: glance/api/glare/v0_1/glare.py:455 #, python-format msgid "Unable to restore artifact %(artifact_id)s: %(e)s" msgstr "" #: glance/api/middleware/cache.py:172 #, python-format msgid "" "Image cache contained image file for image '%s', however the registry did" " not contain metadata for that image!" msgstr "" #: glance/api/middleware/cache.py:272 #, python-format msgid "could not find %s" msgstr "" #: glance/api/middleware/cache.py:291 msgid "Checksum header is missing." msgstr "" #: glance/api/v1/images.py:653 #, python-format msgid "Copy from external source '%(scheme)s' failed for image: %(image)s" msgstr "" #: glance/api/v1/upload_utils.py:76 #, python-format msgid "Unable to kill image %(id)s: " msgstr "" #: glance/api/v1/upload_utils.py:270 #, python-format msgid "Received HTTP error while uploading image %s" msgstr "" #: glance/api/v2/image_data.py:63 #, python-format msgid "Unable to restore image %(image_id)s: %(e)s" msgstr "" #: glance/api/v2/image_data.py:79 #, python-format msgid "Unable to delete image %(image_id)s: %(e)s" msgstr "" #: glance/api/v2/image_data.py:229 #, python-format msgid "Signature verification failed for image %(id)s: %(e)s" msgstr "" #: glance/api/v2/image_data.py:238 glance/api/v2/image_data.py:242 msgid "Failed to upload image data due to HTTP error" msgstr "" #: glance/api/v2/image_data.py:247 msgid "Failed to upload image data due to internal error" msgstr "" #: glance/api/v2/metadef_namespaces.py:206 #, python-format msgid "Failed to delete namespace %(namespace)s " msgstr "" #: glance/async/__init__.py:67 msgid "" "This execution of Tasks is not setup. Please consult the project " "documentation for more information on the executors available." msgstr "" #: glance/async/__init__.py:71 msgid "Internal error occurred while trying to process task." msgstr "" #: glance/async/taskflow_executor.py:109 glance/async/taskflow_executor.py:139 #, python-format msgid "Failed to execute task %(task_id)s: %(exc)s" msgstr "" #: glance/async/flows/base_import.py:161 #, python-format msgid "Failed to execute security checks on the image %(task_id)s: %(exc)s" msgstr "" #: glance/async/flows/base_import.py:179 #, python-format msgid "Task: %(task_id)s failed to import image %(image_id)s to the filesystem." msgstr "" #: glance/async/flows/base_import.py:368 #, python-format msgid "Task ID %s" msgstr "" #: glance/async/flows/introspect.py:60 #, python-format msgid "Failed to execute introspection %(task_id)s: %(exc)s" msgstr "" #: glance/common/property_utils.py:76 #, python-format msgid "Couldn't find property protection file %(file)s: %(error)s." msgstr "" #: glance/common/property_utils.py:83 #, python-format msgid "" "Invalid value '%s' for 'property_protection_rule_format'. The permitted " "values are 'roles' and 'policies'" msgstr "" #: glance/common/property_utils.py:102 #, python-format msgid "" "Multiple policies '%s' not allowed for a given operation. Policies can be" " combined in the policy file" msgstr "" #: glance/common/property_utils.py:115 #, python-format msgid "" "Malformed property protection rule in [%(prop)s] %(op)s=%(perm)s: '@' and" " '!' are mutually exclusive" msgstr "" #: glance/common/property_utils.py:140 #, python-format msgid "Encountered a malformed property protection rule %(rule)s: %(error)s." msgstr "" #: glance/common/rpc.py:186 #, python-format msgid "" "RPC Call Error: %(val)s\n" "%(tb)s" msgstr "" #: glance/common/signature_utils.py:280 glance/common/signature_utils.py:356 #, python-format msgid "Unable to create verifier since algorithm is unsupported: %(e)s" msgstr "" #: glance/common/signature_utils.py:462 #, python-format msgid "Unable to retrieve certificate with ID %(id)s: %(e)s" msgstr "" #: glance/common/store_utils.py:61 #, python-format msgid "Failed to delete image %(image_id)s from store: %(exc)s" msgstr "" #: glance/common/swift_store_utils.py:77 #, python-format msgid "swift config file %(conf_file)s:%(exc)s not found" msgstr "" #: glance/common/swift_store_utils.py:91 msgid "Invalid format of swift store config cfg" msgstr "" #: glance/common/utils.py:108 #, python-format msgid "Error: cooperative_iter exception %s" msgstr "" #: glance/common/utils.py:367 msgid "" "Error setting up the debug environment. Verify that the option " "pydev_worker_debug_host is pointing to a valid hostname or IP on which a " "pydev server is listening on the port indicated by " "pydev_worker_debug_port." msgstr "" #: glance/common/wsgi.py:346 #, python-format msgid "Not respawning child %d, cannot recover from termination" msgstr "" #: glance/common/wsgi.py:913 #, python-format msgid "Caught error: %s" msgstr "" #: glance/common/glare/loader.py:91 #, python-format msgid "Unable to load artifacts: %s" msgstr "" #: glance/common/glare/loader.py:159 #, python-format msgid "Could not load plugin from %(module)s" msgstr "" #: glance/common/location_strategy/__init__.py:63 #, python-format msgid "Failed to load location strategy module %(module)s: %(e)s" msgstr "" #: glance/common/scripts/__init__.py:38 #, python-format msgid "" "This task type %(task_type)s is not supported by the current deployment " "of Glance. Please refer the documentation provided by OpenStack or your " "operator for more information." msgstr "" #: glance/common/scripts/__init__.py:48 #, python-format msgid "Failed to save task %(task_id)s in DB as task_repo is %(task_repo)s" msgstr "" #: glance/common/scripts/utils.py:44 #, python-format msgid "Task not found for task_id %s" msgstr "" #: glance/db/sqlalchemy/api.py:1259 #, python-format msgid "Invalid value for age, %(age)d" msgstr "" #: glance/db/sqlalchemy/api.py:1266 #, python-format msgid "Invalid value for max_rows, %(max_rows)d" msgstr "" #: glance/db/sqlalchemy/glare.py:348 msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" #: glance/db/sqlalchemy/glare.py:561 msgid "Cannot use this parameter with the operator IN" msgstr "" #: glance/db/sqlalchemy/glare.py:569 #, python-format msgid "Operator %s is not supported" msgstr "" #: glance/db/sqlalchemy/metadata.py:194 #, python-format msgid "Json schema files not found in %s. Aborting." msgstr "" #: glance/db/sqlalchemy/metadata.py:211 #, python-format msgid "" "Failed to parse json file %(file_path)s while populating metadata due to:" " %(error_msg)s" msgstr "" #: glance/db/sqlalchemy/metadata.py:462 msgid "" "To use --prefer_new or --overwrite you need to combine of these options " "with --merge option." msgstr "" #: glance/db/sqlalchemy/metadata.py:467 msgid "" "Please provide no more than one option from this list: --prefer_new, " "--overwrite" msgstr "" #: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:61 #: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:91 #, python-format msgid "Invalid store uri for image: %(image_id)s. Details: %(reason)s" msgstr "" #: glance/domain/__init__.py:406 #, python-format msgid "" "Task [%(task_id)s] status failed to change from %(cur_status)s to " "%(new_status)s" msgstr "" #: glance/domain/__init__.py:512 #, python-format msgid "Failed to load the %s executor provided in the config." msgstr "" #: glance/glare/location.py:129 #, python-format msgid "Failed to delete blob %(blob_id)s from store: %(exc)s" msgstr "" #: glance/glare/location.py:166 #, python-format msgid "" "Glance tried all active locations to get data for blob %s but all have " "failed." msgstr "" #: glance/image_cache/__init__.py:268 #, python-format msgid "" "Exception encountered while tee'ing image '%(image_id)s' into cache: " "%(error)s. Continuing with response." msgstr "" #: glance/image_cache/drivers/sqlite.py:393 #, python-format msgid "Error executing SQLite call. Got error: %s" msgstr "" #: glance/registry/api/v1/images.py:134 msgid "Unable to get images" msgstr "" #: glance/registry/api/v1/images.py:351 #, python-format msgid "Unable to show image %s" msgstr "" #: glance/registry/api/v1/images.py:383 #, python-format msgid "Unable to delete image %s" msgstr "" #: glance/registry/api/v1/images.py:433 #, python-format msgid "Unable to create image %s" msgstr "" #: glance/registry/api/v1/images.py:505 #, python-format msgid "Unable to update image %s" msgstr "" #: glance/registry/client/v1/client.py:136 #, python-format msgid "Registry client request %(method)s %(action)s raised %(exc_name)s" msgstr "" #: glance/tests/functional/v2/test_metadef_resourcetypes.py:94 #, python-format msgid "Forbidden to create resource type. Reason: %(reason)s" msgstr "" glance-12.0.0/glance/locale/zh_TW/0000775000567000056710000000000012701407204017704 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/zh_TW/LC_MESSAGES/0000775000567000056710000000000012701407204021471 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/zh_TW/LC_MESSAGES/glance.po0000664000567000056710000031414312701407047023275 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Jennifer , 2016. #zanata # Lucas Palm , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev41\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-12 00:22+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-10 03:06+0000\n" "Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s 必須是字串" #, python-format msgid "%(attribute)s is required" msgstr "需要 %(attribute)s" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s 的長度不能超過 %(length)i" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s 的長度不能小於 %(length)i" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s 應符合型樣 %(pattern)s" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "前一個 RPC 呼叫已發出 %(cls)s 異常狀況:%(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "在映像檔 %(i_id)s 的成員清單中找不到 %(m_id)s。" #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s (pid %(pid)s) 正在執行中..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s 似乎已在執行中:%(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "%(strategy)s 已登錄作為模組兩次。%(module)s 未使用。" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "未適當地配置 %(task_id)s(類型為 %(task_type)s)。無法載入檔案系統儲存庫" #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "未適當地配置 %(task_id)s(類型為 %(task_type)s)。遺漏工作目錄:%(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "正在對 %(serv)s 執行 %(verb)s 作業" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "透過 %(conf)s,正在對 %(serv)s 執行 %(verb)s 作業" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s 請指定 host:port 組,其中 host 是 IPv4 位址、IPv6 位址、主機名稱或 FQDN。" "如果使用 IPv6 位址,請將其單獨括在方括弧內,以與埠區別開(例如 \"[fe80::a:b:" "c]:9876\")。" #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s 不能包含 4 位元組 Unicode 字元。" #, python-format msgid "%s is already stopped" msgstr "已停止 %s" #, python-format msgid "%s is stopped" msgstr "%s 已停止" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "%(param)s 值已超出範圍,不得超過 %(max)d" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "--os_auth_url 選項或 OS_AUTH_URL 環境變數(啟用 Keystone 鑑別策略時需要)\n" msgid "A body is not expected with this request." msgstr "此要求預期不含內文。" msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "格式名稱或名稱-版本中容許的構件清單。空清單表示可以載入任何構件。" #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "名稱為 %(object_name)s 的 meta 資料定義物件已經存在於名稱空間 " "%(namespace_name)s 中。" #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "名稱為 %(property_name)s 的 meta 資料定義內容已經存在於名稱空間 " "%(namespace_name)s 中。" #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "名稱為 %(resource_type_name)s 的 meta 資料定義資源類型已存在。" #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "名稱為 %(name)s 的 meta 資料標籤已存在於名稱空間 %(namespace_name)s 中。" msgid "A set of URLs to access the image file kept in external store" msgstr "用來存取外部儲存庫中所保留映像檔的 URL 集" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "用於加密儲存庫「位置」meta 資料的 AES 金鑰。它(如果使用的話)包含 Swift 或 " "S3 認證。應設定為長度為 16、24 或 32 個位元組的隨機字串" msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "用來連結伺服器的位址。在選取特定網路介面時很有用。" msgid "Address to find the registry server." msgstr "用來尋找登錄伺服器的位址。" msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "容許未經鑑別的使用者以唯讀專用權存取 API。這只有在使用 ContextMiddleware 時才" "適用。" #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "容許的值 %s 在給定的驗證器下無效" msgid "Amount of disk space (in GB) required to boot image." msgstr "啟動映像檔所需的磁碟空間數量(以 GB 為單位)。" msgid "Amount of ram (in MB) required to boot image." msgstr "啟動映像檔所需的 RAM 數量(以 MB 為單位)。" msgid "An identifier for the image" msgstr "映像檔的 ID" msgid "An identifier for the image member (tenantId)" msgstr "映像檔成員的 ID (tenantId)" msgid "An identifier for the owner of this task" msgstr "此作業的擁有者 ID" msgid "An identifier for the task" msgstr "作業的 ID" msgid "An image file url" msgstr "映像檔 URL" msgid "An image schema url" msgstr "映像檔綱目 URL" msgid "An image self url" msgstr "映像檔自身 URL" #, python-format msgid "An image with identifier %s already exists" msgstr "ID 為 %s 的映像檔已存在" msgid "An import task exception occurred" msgstr "發生匯入作業異常狀況" msgid "An object with the same identifier already exists." msgstr "已存在具有相同 ID 的物件。" msgid "An object with the same identifier is currently being operated on." msgstr "目前正在對具有相同 ID 的物件執行作業。" msgid "An object with the specified identifier was not found." msgstr "找不到具有所指定 ID 的物件。" msgid "An unknown exception occurred" msgstr "發生不明異常狀況" msgid "An unknown task exception occurred" msgstr "發生不明的作業異常狀況" #, python-format msgid "Array has no element at position %d" msgstr "陣列在位置 %d 處沒有元素" msgid "Array property can't have item_type=Array" msgstr "陣列內容的 item_type 不能為 Array" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "無法刪除構件 %s,因為它在使用中:%s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "構件無法從狀態 %(source)s 變更為狀態 %(target)s" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "構件超出儲存體配額:%s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "構件沒有內容 %(prop)s" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "構件狀態無法從 %(curr)s 變更為 %(to)s" #, python-format msgid "Artifact storage media is full: %s" msgstr "構件儲存媒體已滿:%s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "名稱為 '%(name)s' 且版本為 '%(version)s' 的構件類型不明" msgid "Artifact with a circular dependency can not be created" msgstr "無法建立具有循環相依關係的構件" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "無法存取 ID 為 %(id)s 的構件" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "找不到 ID 為 %(id)s 的構件" msgid "Artifact with the specified type, name and version already exists" msgstr "已存在具有所指定類型、名稱及版本的構件" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "具有所指定類型、名稱及版本的構件已具有直接相依關係 %(dep)s" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "具有所指定類型、名稱及版本的構件已具有可轉移的相依關係 %(dep)s" msgid "Attempt to set readonly property" msgstr "試圖設定唯讀內容" msgid "Attempt to set value of immutable property" msgstr "試圖設定不可變內容的值" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "嘗試上傳重複的映像檔:%s" msgid "Attempted to update Location field for an image not in queued status." msgstr "已嘗試更新處於未排入佇列狀態之映像檔的「位置」欄位。" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "屬性 '%(property)s' 是唯讀的。" #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "屬性 '%(property)s' 已保留。" #, python-format msgid "Attribute '%s' is read-only." msgstr "屬性 '%s' 是唯讀的。" #, python-format msgid "Attribute '%s' is reserved." msgstr "屬性 '%s' 已保留。" msgid "Attribute container_format can be only replaced for a queued image." msgstr "僅已排入佇列的映像檔可以取代屬性 container_format。" msgid "Attribute disk_format can be only replaced for a queued image." msgstr "僅已排入佇列的映像檔可以取代屬性 disk_format。" msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "針對 Swift 鑑別服務進行使用者鑑別的鑑別金鑰。(已淘汰)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "在 URL %(url)s 處找不到鑑別服務。" #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "鑑別錯誤 - 在檔案上傳期間,記號可能已過期。正在刪除 %s 的映像檔資料。" msgid "Authorization failed." msgstr "授權失敗。" msgid "Available categories:" msgstr "可用的種類:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "\"%s\" 查詢過濾器格式錯誤。請使用 ISO 8601 日期時間表示法。" #, python-format msgid "Bad Command: %s" msgstr "錯誤的指令:%s" #, python-format msgid "Bad header: %(header_name)s" msgstr "錯誤的標頭:%(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "傳遞給過濾器 %(filter)s 的值不正確,取得 %(val)s" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "S3 URI 的格式不正確:%(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Swift URI 中認證 '%(creds)s' 的格式不正確" msgid "Badly formed credentials in Swift URI." msgstr "Swift URI 中認證的格式不正確。" msgid "Base directory that the image cache uses." msgstr "映像檔快取所使用的基本目錄。" msgid "BinaryObject property cannot be declared mutable" msgstr "不能反覆宣告 BinaryObject 內容" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "二進位大型物件 %(name)s 不能具有多個值" msgid "Blob size is not set" msgstr "未設定二進位大型物件大小" msgid "Body expected in request." msgstr "要求中需要內文。" msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "不能同時指定檔案和 legacy_image_id" msgid "CA certificate file to use to verify connecting clients." msgstr "要用來驗證連接用戶端的 CA 憑證檔。" msgid "Cannot be a negative value" msgstr "不能是負數值" msgid "Cannot be a negative value." msgstr "不能是負數值。" #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "無法將映像檔 %(key)s '%(value)s' 轉換為整數。" msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "無法宣告具有保留名稱 'metadata' 的構件內容" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "無法載入構件 '%(name)s'" msgid "Cannot remove last location in the image." msgstr "無法移除映像檔中的最後位置。" #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "無法儲存映像檔 %(image_id)s 的資料:%(error)s" msgid "Cannot set locations to empty list." msgstr "無法將位置設為空白清單。" msgid "Cannot specify 'max_size' explicitly" msgstr "無法明確指定 'max_size'" msgid "Cannot specify 'min_size' explicitly" msgstr "無法明確指定 'min_size'" msgid "Cannot upload to an unqueued image" msgstr "無法上傳至未排入佇列的映像檔" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "無法將此參數與運算子 %(op)s 搭配使用" msgid "Certificate file to use when starting API server securely." msgstr "安全啟動 API 伺服器時要使用的憑證檔。" #, python-format msgid "Certificate format not supported: %s" msgstr "憑證格式不受支援:%s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "在世界標準時間 %s 之後,憑證無效" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "在世界標準時間 %s 之前,憑證無效" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "總和檢查驗證失敗。已中止快取映像檔 '%s'。" msgid "Client disconnected before sending all data to backend" msgstr "用戶端已在將所有資料傳送至後端之前斷線" msgid "Command not found" msgstr "找不到指令" msgid "Configuration option was not valid" msgstr "配置選項無效" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "將錯誤/不當的要求連接至 URL %(url)s 處的鑑別服務。" #, python-format msgid "Constructed URL: %s" msgstr "已建構 URL:%s" msgid "Container format is not specified." msgstr "未指定儲存器格式。" msgid "Content-Type must be application/octet-stream" msgstr "內容類型必須是 application/octet-stream" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "映像檔 %(image_id)s 的映像檔下載已毀損" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "嘗試 30 秒鐘後仍無法連結至 %(host)s:%(port)s" msgid "Could not find OVF file in OVA archive file." msgstr "在 OVA 保存檔中找不到 OVF 檔。" #, python-format msgid "Could not find metadata object %s" msgstr "找不到 meta 資料物件 %s" #, python-format msgid "Could not find metadata tag %s" msgstr "找不到 meta 資料標籤 %s" #, python-format msgid "Could not find namespace %s" msgstr "找不到名稱空間 %s" #, python-format msgid "Could not find property %s" msgstr "找不到內容 %s" msgid "Could not find required configuration option" msgstr "找不到必要配置選項" #, python-format msgid "Could not find task %s" msgstr "找不到作業 %s" #, python-format msgid "Could not update image: %s" msgstr "無法更新映像檔:%s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "目前,不支援包含多個磁碟的 OVA 套件。" msgid "Custom validators list should contain tuples '(function, message)'" msgstr "自訂驗證器清單應包含值組 '(function, message)'" #, python-format msgid "Data for image_id not found: %s" msgstr "找不到 image_id 的資料:%s" msgid "Data supplied was not valid." msgstr "提供的資料無效。" msgid "Date and time of image member creation" msgstr "映像檔成員的建立日期和時間" msgid "Date and time of image registration" msgstr "映像檔登錄的日期和時間" msgid "Date and time of last modification of image member" msgstr "映像檔成員的前次修改日期和時間" msgid "Date and time of namespace creation" msgstr "名稱空間的建立日期和時間" msgid "Date and time of object creation" msgstr "物件的建立日期和時間" msgid "Date and time of resource type association" msgstr "資源類型關聯的日期和時間" msgid "Date and time of tag creation" msgstr "標記的建立日期和時間" msgid "Date and time of the last image modification" msgstr "映像檔的前次修改日期和時間" msgid "Date and time of the last namespace modification" msgstr "名稱空間的前次修改日期和時間" msgid "Date and time of the last object modification" msgstr "物件的前次修改日期和時間" msgid "Date and time of the last resource type association modification" msgstr "資源類型關聯的前次修改日期和時間" msgid "Date and time of the last tag modification" msgstr "標記的前次修改日期和時間" msgid "Datetime when this resource was created" msgstr "此資源的建立日期時間" msgid "Datetime when this resource was updated" msgstr "此資源的更新日期時間" msgid "Datetime when this resource would be subject to removal" msgstr "可能會移除此資源的日期時間" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "未在要求中明確指定項目數的情況下由要求所傳回的項目數預設值" msgid "Default value is invalid" msgstr "預設值無效" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "正在拒絕嘗試上傳構件,因為它已超出配額:%s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "正在拒絕嘗試上傳映像檔,因為它已超出配額:%s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "正在拒絕嘗試上傳大於 %d 個位元組的映像檔。" #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "必須先刪除相依關係內容 '%s'" msgid "Dependency relations cannot be mutable" msgstr "相依關係的關係必須是不可變的" msgid "Deploy the v1 OpenStack Images API." msgstr "部署第 1 版 OpenStack 映像檔 API。" msgid "Deploy the v1 OpenStack Registry API." msgstr "部署第 1 版 OpenStack 登錄 API。" msgid "Deploy the v2 OpenStack Images API." msgstr "部署第 2 版 OpenStack 映像檔 API。" msgid "Deploy the v2 OpenStack Registry API." msgstr "部署第 2 版 OpenStack 登錄 API。" msgid "Descriptive name for the image" msgstr "映像檔的敘述性名稱" msgid "Dictionary contains unexpected key(s)" msgstr "字典包含非預期的索引鍵" msgid "Dictionary size is greater than maximum" msgstr "字典大小大於上限" msgid "Dictionary size is less than minimum" msgstr "字典大小小於下限" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "將用於數位簽章的摘要演算法。請使用指令 \"openssl list-message-digest-" "algorithms\" 來取得平台上 OpenSSL 版本所支援的可用算法。例如:" "\"sha1\"、\"sha256\"、\"sha512\" 等。" msgid "Disk format is not specified." msgstr "未指定磁碟格式。" msgid "Does not match pattern" msgstr "不符合型樣" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "無法正確地配置驅動程式 %(driver_name)s。原因:%(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "必須指定檔案或 legacy_image_id" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "將您的要求進行解碼時發生錯誤。URL 或要求內文包含無法由 Glance 進行解碼的字元" #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "提取映像檔 %(image_id)s 的成員時發生錯誤:%(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "儲存庫配置發生錯誤。已停用新增構件至儲存庫。" msgid "Error in store configuration. Adding images to store is disabled." msgstr "儲存庫配置發生錯誤。已停用新增映像檔至儲存庫。" msgid "Error occurred while creating the verifier" msgstr "建立驗證器時發生錯誤" msgid "Error occurred while verifying the signature" msgstr "驗證簽章時發生錯誤" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "預期成員的格式為:{\"member\": \"image_id\"}" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "預期狀態的格式為:{\"status\": \"status\"}" msgid "External source should not be empty" msgstr "外部來源不應是空的" #, python-format msgid "External sources are not supported: '%s'" msgstr "不支援外部來源:'%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "無法啟動映像檔。發生錯誤:%s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "無法新增映像檔 meta 資料。發生錯誤:%s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "找不到要刪除的構件 %(artifact_id)s" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "找不到要刪除的映像檔 %(image_id)s" #, python-format msgid "Failed to find image to delete: %s" msgstr "找不到要刪除的映像檔:%s" #, python-format msgid "Failed to find image to update: %s" msgstr "找不到要更新的映像檔:%s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "找不到要刪除的資源類型 %(resourcetype)s" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "無法起始設定映像檔快取資料庫。發生錯誤:%s" #, python-format msgid "Failed to read %s from config" msgstr "無法從配置中讀取 %s" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "無法保留映像檔。發生錯誤:%s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "無法更新映像檔 meta 資料。發生錯誤:%s" #, python-format msgid "Failed to upload image %s" msgstr "無法上傳映像檔 %s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "由於 HTTP 錯誤而無法上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "由於內部錯誤而無法上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "檔案 %(path)s 具有無效的支援檔案 %(bfile)s,正在中斷。" msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "不容許檔案型匯入。請使用映像檔資料的非本端來源。" msgid "File too large" msgstr "檔案太大" msgid "File too small" msgstr "檔案太小" msgid "Forbidden image access" msgstr "已禁止映像檔存取" #, python-format msgid "Forbidden to delete a %s image." msgstr "已禁止刪除 %s 映像檔。" #, python-format msgid "Forbidden to delete image: %s" msgstr "已禁止刪除映像檔:%s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "已禁止修改 %(status)s 映像檔的 '%(key)s'。" #, python-format msgid "Forbidden to modify '%s' of image." msgstr "禁止修改映像檔的 '%s'。" msgid "Forbidden to reserve image." msgstr "已禁止保留映像檔。" msgid "Forbidden to update deleted image." msgstr "已禁止更新所刪除的映像檔。" #, python-format msgid "Forbidden to update image: %s" msgstr "已禁止更新映像檔:%s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "已禁止的上傳嘗試:%s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "正在禁止要求,meta 資料定義名稱空間 %s 不可見。" #, python-format msgid "Forbidding request, task %s is not visible" msgstr "正在禁止要求,作業 %s 不可見" msgid "Format of the container" msgstr "儲存器的格式" msgid "Format of the disk" msgstr "磁碟的格式" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "取得二進位大型物件 %(name)s 資料失敗:%(err)s。" #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "取得映像檔 %(id)s 資料時失敗:%(err)s。" msgid "Glance client not installed" msgstr "未安裝 Glance 用戶端" #, python-format msgid "Host \"%s\" is not valid." msgstr "主機 \"%s\" 無效。" #, python-format msgid "Host and port \"%s\" is not valid." msgstr "主機和埠 \"%s\" 無效。" msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "適當的時候(通常是失敗時)僅併入人類可讀的參考訊息" msgid "If False doesn't trace SQL requests." msgstr "如果為 False,則將不追蹤 SQL 要求。" msgid "If False fully disable profiling feature." msgstr "如果為 False,則將完全停用側寫功能。" msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "如果為 False,則伺服器將在其回應中傳回標頭 \"Connection: close\",如果為 " "True,則伺服器將傳回 \"Connection: Keep-Alive\"。若要在傳送回應且用戶端已順利" "讀取回應之後明確關閉用戶端 Socket 連線,您只需在建立 wsgi 伺服器時,將此選項" "設定為 False。" msgid "If true, image will not be deletable." msgstr "如果為 true,則映像檔不可刪除。" msgid "If true, namespace will not be deletable." msgstr "如果為 True,則名稱空間將不可刪除。" #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "無法刪除映像檔 %(id)s,因為它在使用中:%(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "找不到映像檔 %(id)s" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "上傳之後找不到映像檔 %(image_id)s。可能已在上傳期間刪除該映像檔:%(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "映像檔 %(image_id)s 已受保護,無法刪除。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "上傳之後找不到映像檔 %s。可能已在上傳期間刪除該映像檔,正在清除已上傳的區塊。" #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "在上傳之後,找不到映像檔 %s。在上傳期間,可能已刪除該映像檔。" #, python-format msgid "Image %s is deactivated" msgstr "已取消啟動映像檔 %s" #, python-format msgid "Image %s is not active" msgstr "映像檔 %s 不在作用中" #, python-format msgid "Image %s not found." msgstr "找不到映像檔 %s。" #, python-format msgid "Image exceeds the storage quota: %s" msgstr "映像檔超出儲存體配額:%s" msgid "Image id is required." msgstr "映像檔 ID 是必要的。" msgid "Image is protected" msgstr "映像檔是受保護的" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "已超出映像檔 %(id)s 的映像檔成員限制:%(e)s:" #, python-format msgid "Image name too long: %d" msgstr "映像檔名稱太長:%d" msgid "Image operation conflicts" msgstr "映像檔作業衝突" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "不容許映像檔狀態從 %(cur_status)s 轉移至 %(new_status)s" #, python-format msgid "Image storage media is full: %s" msgstr "映像檔儲存媒體已滿:%s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "已超出映像檔 %(id)s 的映像檔標籤限制:%(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "映像檔上傳問題:%s" #, python-format msgid "Image with identifier %s already exists!" msgstr "ID 為 %s 的映像檔已存在!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "已刪除 ID 為 %s 的映像檔。" #, python-format msgid "Image with identifier %s not found" msgstr "找不到 ID 為 %s 的映像檔" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "找不到具有給定 ID %(image_id)s 的映像檔" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "不正確的鑑別策略,需要 \"%(expected)s\",但收到 \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "不正確的要求:%s" msgid "Index is out of range" msgstr "索引超出範圍" msgid "Index is required" msgstr "索引是必要的" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "輸入不包含 '%(key)s' 欄位" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "對構件儲存媒體的許可權不足:%s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "對映像檔儲存媒體的許可權不足:%s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "用來與 %s 搭配使用的 Content-Type 無效" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "此資源的 JSON 指標無效:'/%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "無效的憑證格式:%s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "無效的總和檢查 '%s':不能超過 32 個字元" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift 配置檔中的配置無效。" msgid "Invalid configuration in property protection file." msgstr "內容保護檔案中的配置無效。" #, python-format msgid "Invalid container format '%s' for image." msgstr "映像檔的儲存器格式 '%s' 無效。" #, python-format msgid "Invalid content type %(content_type)s" msgstr "無效的內容類型 %(content_type)s" msgid "Invalid dict property type" msgstr "無效的字典內容類型" msgid "Invalid dict property type specification" msgstr "無效的字典內容類型規格" #, python-format msgid "Invalid disk format '%s' for image." msgstr "映像檔的磁碟格式 '%s' 無效。" #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "無效的過濾器值 %s。遺漏右引號。" #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "無效的過濾器值 %s。右引號後面沒有逗點。" #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "無效的過濾器值 %s。左引號前面沒有逗點。" #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "無效的標頭 \"Content-Type\":%s" msgid "Invalid image id format" msgstr "無效的映像檔 ID 格式" msgid "Invalid item type specification" msgstr "無效的項目類型規格" #, python-format msgid "Invalid json body: %s" msgstr "無效的 JSON 內文:%s" msgid "Invalid jsonpatch request" msgstr "無效的 jsonpatch 要求" msgid "Invalid location" msgstr "無效的位置" #, python-format msgid "Invalid location %s" msgstr "無效的位置 %s" #, python-format msgid "Invalid location: %s" msgstr "無效的位置:%s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "無效的 location_strategy 選項:%(name)s。有效的策略選項為:%(strategies)s" msgid "Invalid locations" msgstr "無效的位置" #, python-format msgid "Invalid locations: %s" msgstr "無效的位置:%s" msgid "Invalid marker format" msgstr "無效的標記格式" msgid "Invalid marker. Image could not be found." msgstr "無效的標記。找不到映像檔。" #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "無效的 mask_gen_algorithm:%s" #, python-format msgid "Invalid membership association: %s" msgstr "無效的成員資格關聯:%s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "磁碟格式及儲存器格式的混合無效。將磁碟格式或儲存器格式設為 'aki'、'ari' 或 " "'ami' 其中之一時,儲存器格式及磁碟格式必須相符。" #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "無效作業:`%(op)s`。它必須是下列其中一項:%(available)s。" msgid "Invalid position for adding a location." msgstr "用於新增位置的位置無效。" msgid "Invalid position for removing a location." msgstr "用於移除位置的位置無效。" msgid "Invalid property definition" msgstr "無效的內容定義" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "無效的 pss_salt_length:%s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "簽章金鑰類型的公開金鑰類型無效:%s" msgid "Invalid reference list specification" msgstr "無效的參照清單規格" msgid "Invalid referenced type" msgstr "無效的參照類型" msgid "Invalid request PATCH for work with blob" msgstr "用來與 Blob 搭配使用的要求 PATCH 無效" msgid "Invalid service catalog json." msgstr "無效的服務型錄 JSON。" #, python-format msgid "Invalid signature hash method: %s" msgstr "無效的簽章雜湊方法:%s" #, python-format msgid "Invalid signature key type: %s" msgstr "無效的簽章金鑰類型:%s" #, python-format msgid "Invalid sort direction: %s" msgstr "無效的排序方向:%s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "無效的排序鍵:%(sort_key)s。如果未設定類型版本,則它必須是下列其中一項:" "%(available)s。" #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "排序鍵 %(sort_key)s 無效。它必須為下列其中一項:%(available)s。" #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "無效的排序鍵:%(sort_key)s。您無法依此內容進行排序" #, python-format msgid "Invalid status value: %s" msgstr "無效的狀態值:%s" #, python-format msgid "Invalid status: %s" msgstr "無效的狀態:%s" #, python-format msgid "Invalid time format for %s." msgstr "%s 的時間格式無效。" msgid "Invalid type definition" msgstr "無效的類型定義" #, python-format msgid "Invalid type value: %s" msgstr "無效的類型值:%s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "更新無效。它會導致產生具有相同名稱 %s 的重複 meta 資料定義名稱空間。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無效的更新。此更新將導致下列名稱空間中存在具有相同名稱%(name)s 的重複 meta 資" "料定義物件:%(namespace_name)s。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "無效的更新。此更新將導致下列名稱空間中存在具有相同名稱%(name)s 的重複 meta 資" "料定義物件:%(namespace_name)s。" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "更新無效。它會導致在下列名稱空間中產生具有相同名稱 %(name)s 的重複 meta 資料" "定義內容:%(namespace_name)s。" #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "參數 '%(param)s' 的值 '%(value)s' 無效:%(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "選項 %(option)s 的值 %(value)s 無效" #, python-format msgid "Invalid visibility value: %s" msgstr "無效的可見性值:%s" msgid "Is not allowed value" msgstr "不是容許的值" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "eventlet 模組似乎已在設定 %s='yes' 之前匯入。如果使用 ipv6,則目前需要停用 " "eventlet.greendns,因為 eventlet.greendns 目前以 ipv6 位址岔斷。請確保未在設" "定此項之前匯入 eventlet。" msgid "It's invalid to provide multiple image sources." msgstr "提供多個映像檔來源是無效的做法。" msgid "It's not allowed to add locations if locations are invisible." msgstr "如果位置是隱藏的,則不容許新增位置。" msgid "It's not allowed to remove locations if locations are invisible." msgstr "如果位置是隱藏的,則不容許移除位置。" msgid "It's not allowed to update locations if locations are invisible." msgstr "如果位置是隱藏的,則不容許更新位置。" msgid "Items have to be unique" msgstr "項目必須是唯一的" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "JSON 路徑應以 '/' 開頭並以 '/' 結尾,不容許兩個連續的 '/'。" msgid "Legacy image was not found" msgstr "找不到舊式映像檔" msgid "Length is greater than maximum" msgstr "長度大於上限" msgid "Length is less than minimum" msgstr "長度小於下限" msgid "Limit param must be an integer" msgstr "限制參數必須是整數" msgid "Limit param must be positive" msgstr "限制參數必須是正數" #, python-format msgid "Limit param must not be higher than %d" msgstr "限制參數不得高於 %d" msgid "Limits request ID length." msgstr "限制要求 ID 長度。" msgid "List definitions may hot have defaults" msgstr "清單定義可能沒有預設值" msgid "List of strings related to the image" msgstr "與映像檔相關的字串清單" msgid "List size is greater than maximum" msgstr "清單大小大於上限" msgid "List size is less than minimum" msgstr "清單大小小於下限" msgid "Loop time between checking for new items to schedule for delete." msgstr "檢查要排程進行刪除之新項目之間的迴圈時間。" #, python-format msgid "Malformed Content-Range header: %s" msgstr "形態異常的內容範圍標頭:%s" msgid "Malformed JSON in request body." msgstr "要求內文中 JSON 的格式不正確。" msgid "Max string length may not exceed 255 characters" msgstr "字串長度上限不能超過 255 個字元" msgid "Maximal age is count of days since epoch." msgstr "經歷時間上限是自新紀元以來的天數。" msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "要接受的訊息標頭行大小上限。如果使用大記號(通常是那些由 Keystone 第 3 版 " "API 透過大型服務型錄所產生的記號),則可能需要增加 max_header_line 值" msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "每個映像檔的映像檔成員數目上限。負數值求值為無限制。" msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "映像檔上所容許的位置數目上限。負數值求值為無限制值。" msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "映像檔上所容許的內容數目上限。負數值求值為無限制值。" msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "映像檔上所容許的標籤數目上限。負數值求值為無限制。" msgid "Maximum permissible number of items that could be returned by a request" msgstr "要求可以傳回的所允許項目數上限" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "已超出重新導向數目上限(%(redirects)s 個)。" msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "使用者可以上傳的映像檔大小上限(以位元組為單位)。預設為1099511627776 個位元" "組 (1 TB)。警告:應僅在經過慎重考慮之後才增大此值,且必須設定為小於 8 EB " "(9223372036854775808) 的值。" #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "針對映像檔 %(image_id)s,成員 %(member_id)s 重複" msgid "Member can't be empty" msgstr "成員不能是空的" msgid "Member to be added not specified" msgstr "未指定要新增的成員" msgid "Membership could not be found." msgstr "找不到成員資格。" #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "Meta 資料定義名稱空間 %(namespace)s 受保護,無法將其刪除。" #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "找不到 ID 為 %s 的 meta 資料定義名稱空間" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "找不到 meta 資料定義名稱空間 %(namespace_name)s。" #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "Meta 資料定義物件 %(object_name)s 受保護,無法將其刪除。" #, python-format msgid "Metadata definition object not found for id=%s" msgstr "找不到 ID 為 %s 的 meta 資料定義物件" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "Meta 資料定義內容 %(property_name)s 受保護,無法將其刪除。" #, python-format msgid "Metadata definition property not found for id=%s" msgstr "找不到 ID 為 %s 的 meta 資料定義內容" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "Meta 資料定義資源類型 %(resource_type_name)s 是種子系統類型,無法將其刪除。" #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "Meta 資料定義資源類型關聯 %(resource_type)s 已受保護,無法將其刪除。" #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "meta 資料定義標籤 %(tag_name)s 受保護,無法將其刪除。" #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "找不到 ID 為 %s 的 meta 資料定義標籤" msgid "Min string length may not be negative" msgstr "字串長度下限不能為負數" msgid "Minimal rows limit is 1." msgstr "列數下限限制為 1。" #, python-format msgid "Missing required credential: %(required)s" msgstr "遺漏了必要認證:%(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "區域 %(region)s 有多個「映像檔」服務相符項。這通常表示需要一個區域,但您尚未" "提供。" msgid "Must supply a positive, non-zero value for age." msgstr "必須為經歷時間提供非零正數值。" msgid "Name of the paste configuration file." msgstr "paste 配置檔的名稱。" #, python-format msgid "No artifact found with ID %s" msgstr "找不到 ID 為 %s 的構件" msgid "No authenticated user" msgstr "沒有已鑑別使用者" #, python-format msgid "No image found with ID %s" msgstr "找不到 ID 為 %s 的映像檔" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "從映像檔 %(img)s 中找不到 ID 為 %(loc)s 的位置" msgid "No permission to share that image" msgstr "沒有共用該映像檔的許可權" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "未載入 '%(name)s' 的外掛程式" msgid "No property to access" msgstr "沒有要存取的內容" #, python-format msgid "No such key '%s' in a dict" msgstr "字典中沒有此類鍵 '%s'" #, python-format msgid "Not a blob property '%s'" msgstr "不是二進位大型物件內容 '%s'" msgid "Not a downloadable entity" msgstr "不是可下載的實體" msgid "Not a list property" msgstr "不是清單內容" #, python-format msgid "Not a list property '%s'" msgstr "不是清單內容 '%s'" msgid "Not a valid value type" msgstr "不是有效的值類型" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "並非所有相依關係都處於 '%s' 狀態" #, python-format msgid "Not allowed to create members for image %s." msgstr "不容許建立映像檔 %s 的成員。" #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "不容許取消啟動處於狀態 '%s' 的映像檔" #, python-format msgid "Not allowed to delete members for image %s." msgstr "不容許刪除映像檔 %s 的成員。" #, python-format msgid "Not allowed to delete tags for image %s." msgstr "不容許刪除映像檔 %s 的標籤。" #, python-format msgid "Not allowed to list members for image %s." msgstr "不容許列出映像檔 %s 的成員。" #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "不容許重新啟動處於狀態 '%s' 的映像檔" #, python-format msgid "Not allowed to update members for image %s." msgstr "不容許更新映像檔 %s 的成員。" #, python-format msgid "Not allowed to update tags for image %s." msgstr "不容許更新映像檔 %s 的標籤。" #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "不容許上傳映像檔 %(image_id)s 的映像檔資料:%(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "不是陣列 idx '%s'" msgid "Number of sort dirs does not match the number of sort keys" msgstr "排序方向數目與排序鍵數目不符" msgid "OVA extract is limited to admin" msgstr "OVA 擷取已限制為管理者" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "在 ovf-metadata.json 配置檔中,未指定相關 OVF meta 資料。請將 \"cim_pasd\" 設" "為相關 CIM_ProcessorAllocationSettingData 內容的清單。" msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "找不到 OVF 內容配置檔 \"ovf-metadata.json\"。" msgid "Old and new sorting syntax cannot be combined" msgstr "無法結合新舊排序語法" msgid "Only list indexes are allowed for blob lists" msgstr "二進位大型物件清單只接受清單索引" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "作業 \"%s\" 需要名稱為 \"value\" 的成員。" msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "作業物件必須正好包含一個名稱為 \"add\"、\"remove\" 或 \"replace\" 的成員。" msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "作業物件只能包含一個名稱為 \"add\"、\"remove\" 或 \"replace\" 的成員。" msgid "Operations must be JSON objects." msgstr "作業必須是 JSON 物件。" #, python-format msgid "Operator %(op)s is not supported" msgstr "運算子 %(op)s 不受支援" #, python-format msgid "Original locations is not empty: %s" msgstr "原始位置不是空的:%s" msgid "Owner can't be updated by non admin." msgstr "擁有者無法由非管理者進行更新。" msgid "Owner must be specified to create a tag." msgstr "必須指定擁有者才能建立標籤。" msgid "Owner of the image" msgstr "映像檔的擁有者" msgid "Owner of the namespace." msgstr "名稱空間的擁有者。" msgid "Param values can't contain 4 byte unicode." msgstr "參數值不能包含 4 位元組 Unicode。" msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "paste 配置檔中 pipeline 的部分名稱(移除了服務名稱)。比方說,如果 paste 區段" "名稱為[pipeline:glance-api-keystone],請使用值 \"keystone\"" msgid "Path to the directory where json metadata files are stored" msgstr "在其中儲存 JSON meta 資料檔的目錄路徑" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "外掛程式名稱 '%(plugin)s' 應該與構件類型名稱 '%(name)s' 相符" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "指標 `%s` 包含不屬於可辨識 ESC 序列的 \"~\"。" #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "指標 `%s` 包含相鄰的 \"/\"。" #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "指標 `%s` 不包含有效的記號。" #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "指標 `%s` 的開頭不是 \"/\"。" #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "指標 `%s` 的結尾是 \"/\"。" msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "指標所包含的 '~' 不是可辨識 ESC 序列 [~0, ~1] 的一部分。" #, python-format msgid "Port \"%s\" is not valid." msgstr "埠 \"%s\" 無效。" msgid "Port the registry server is listening on." msgstr "登錄伺服器正在其上接聽的埠。" #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "Prerelease 數值元件太大(上限為 %d 個字元)" msgid "Private key file to use when starting API server securely." msgstr "安全啟動 API 伺服器時要使用的私密金鑰檔。" #, python-format msgid "Process %d not running" msgstr "程序 %d 不在執行中" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "儲存資料之前必須設定內容 %s。" #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "內容 %(property_name)s 的開頭不是預期的資源類型關聯字首 '%(prefix)s'。" #, python-format msgid "Property %s already present." msgstr "內容 %s 已存在。" #, python-format msgid "Property %s does not exist." msgstr "內容 %s 不存在。" #, python-format msgid "Property %s may not be removed." msgstr "可能無法移除內容 %s。" #, python-format msgid "Property %s must be set prior to saving data." msgstr "儲存資料之前必須設定內容 %s。" #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "內容 '%(name)s' 可能沒有值 '%(val)s':%(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "內容 '%s' 受保護" msgid "Property names can't contain 4 byte unicode." msgstr "內容名稱不能包含 4 位元組 Unicode。" #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "找不到規則 %(rule)s 的作業 %(operation)s 上的內容保護。將不容許任何角色執行此" "作業。" #, python-format msgid "Property's %(prop)s value has not been found" msgstr "找不到內容 %(prop)s 的值" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "提供的映像檔大小必須符合儲存的映像檔大小。(提供的大小:%(ps)d,儲存的大小:" "%(ss)d)" #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "所提供的物件與綱目 '%(schema)s' 不符:%(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "提供的作業狀態 %(status)s 不受支援" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "提供的作業類型 %(type)s 不受支援" msgid "Provides a user friendly description of the namespace." msgstr "提供對使用者更為友善的名稱空間說明。" msgid "Public images do not have members." msgstr "公用映像檔沒有成員。" msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "要用於版本端點的公用 URL。預設值為 None,這將會使用要求的 host_url 屬性來移入" "基本 URL。如果 Glance 在 Proxy 後方作業,您將會想要變更此項以代表Proxy 的 " "URL。" msgid "Python module path of data access API" msgstr "資料存取 API 的 Python 模組路徑" msgid "Received invalid HTTP redirect." msgstr "收到無效的 HTTP 重新導向。" #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "正在重新導向至 %(uri)s 以進行授權。" #, python-format msgid "Registry service can't use %s" msgstr "登錄服務無法使用 %s" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "API 伺服器上未正確地配置登錄。原因:%(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "關係 %(name)s 不能具有多個值" #, python-format msgid "Reload of %(serv)s not supported" msgstr "不支援重新載入 %(serv)s" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "正在使用信號 (%(sig)s) 來重新載入 %(serv)s (pid %(pid)s)" #, python-format msgid "Removing stale pid file %s" msgstr "正在移除過時 PID 檔案 %s" msgid "Request body must be a JSON array of operation objects." msgstr "要求內文必須是作業物件的 JSON 陣列。" msgid "Request must be a list of commands" msgstr "要求必須是指令清單" msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "簽章驗證所需的映像檔內容不存在。無法驗證簽章。" #, python-format msgid "Required store %s is invalid" msgstr "需要的儲存庫 %s 無效" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "資源類型名稱應該儘可能與 Heat 資源類型一致:http://docs.openstack.org/" "developer/heat/template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Keystone 的回應不包含 Glance 端點。" msgid "Role used to identify an authenticated user as administrator." msgstr "用來將已鑑別使用者識別為管理者的角色。" msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "作為長時間執行程序來執行。當未指定時(預設值),執行清除作業一次,然後結束。" "當指定時,不結束,並依據配置中指定的 wakeup_time 間隔來執行清除。" msgid "Scope of image accessibility" msgstr "映像檔的可存取性範圍" msgid "Scope of namespace accessibility." msgstr "名稱空間的可存取性範圍。" msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "用來簽署 Glance API 及「Glance 登錄」追蹤訊息的秘密金鑰。" #, python-format msgid "Server %(serv)s is stopped" msgstr "伺服器 %(serv)s 已停止" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "建立伺服器工作程式失敗:%(reason)s。" msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "給每個使用者設定系統通用的配額。此值是使用者可以在所有儲存體系統上使用的總容" "量。值 0 表示無限制。可以為該值指定選用單位。接受的單位有B、KB、MB、GB 和 " "TB,分別代表位元組、千位元組、百萬位元組、十億位元組和兆位元組。如果未指定單" "位,則將採用位元組。請注意,在值與單位之間不應有任何空格,並且單位區分大小" "寫。" #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "顯示層次 %(shl)s 在此作業中不受支援" msgid "Signature verification failed" msgstr "簽章驗證失敗" msgid "Signature verification failed." msgstr "簽章驗證失敗。" msgid "Size of image file in bytes" msgstr "映像檔的大小(以位元組為單位)" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "部分資源類型容許每個實例具有多個鍵值組。例如,Cinder 容許使用者及映像檔 meta " "資料存在於多個磁區上。Nova 只評估映像檔內容 meta 資料(正在排程或驅動程式)。" "此內容容許名稱空間目標消除此語義不明確情況。" msgid "Sort direction supplied was not valid." msgstr "提供的排序方向無效。" msgid "Sort key supplied was not valid." msgstr "提供的排序鍵無效。" msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "指定要用於給定資源類型的字首。將名稱空間內的任何內容套用至指定的資源類型時," "都應該為該內容新增此字首。必須包括字首分隔字元(例如,冒號 :)。" msgid "Specifies which task executor to be used to run the task scripts." msgstr "指定要用於執行作業 Script 的作業執行程式。" msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "狀態必須是 \"pending\"、\"accepted\" 或 \"rejected\"。" msgid "Status not specified" msgstr "未指定狀態" msgid "Status of the image" msgstr "映像檔的狀態" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "不容許狀態從 %(cur_status)s 轉移至 %(new_status)s" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "正在使用信號 (%(sig)s) 來停止 %(serv)s (pid %(pid)s)" #, python-format msgid "Store for image_id not found: %s" msgstr "找不到 image_id 的儲存庫:%s" #, python-format msgid "Store for scheme %s not found" msgstr "找不到架構 %s 的儲存庫" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "提供的 %(attr)s (%(supplied)s),與從所上傳映像檔 (%(actual)s) 產生的 " "%(attr)s 不符。正在將映像檔狀態設為「已結束」。" msgid "Supported values for the 'container_format' image attribute" msgstr "'container_format' 映像檔屬性的支援值" msgid "Supported values for the 'disk_format' image attribute" msgstr "'disk_format' 映像檔屬性的支援值" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "已暫停重新大量產生,因為 %(serv)s 是 %(rsn)s。" msgid "System SIGHUP signal received." msgstr "接收到系統 SIGHUP 信號。" #, python-format msgid "Task '%s' is required" msgstr "需要作業 '%s'" msgid "Task does not exist" msgstr "作業不存在" msgid "Task failed due to Internal Error" msgstr "由於內部錯誤,作業失敗" msgid "Task was not configured properly" msgstr "作業未適當地配置" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "找不到具有給定 ID %(task_id)s 的作業" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "在第 2 版上,已無法再使用 \"changes-since\" 過濾器。" #, python-format msgid "The CA file you specified %s does not exist" msgstr "指定的 CA 檔 %s 不存在" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "此作業 %(task_id)s 所建立的映像檔 %(image_id)s 物件不再處於有效狀態,無法進一" "步處理。" msgid "The Store URI was malformed." msgstr "儲存庫 URI 的格式不正確。" msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "Keystone 服務的 URL。如果 \"use_user_token\" 未生效並且使用了 Keystone 鑑別," "則可以指定 Keystone 的 URL。" msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "Swift 鑑別服務正在接聽的位址。(已淘汰)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "管理者密碼。如果 \"use_user_token\" 未生效,則可以指定管理認證。" msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "管理者使用者名稱。如果 \"use_user_token\" 未生效,則可以指定管理認證。" msgid "The amount of time in seconds to delay before performing a delete." msgstr "執行刪除之前要延遲的時間量(以秒為單位)。" msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "在快取中保留不完整映像檔的時間量,超過該時間量後,快取刪改程式(如果在執行" "中)將移除不完整的映像檔。" msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "建立 TCP 接聽器 Socket 時將使用的待辦事項值。" #, python-format msgid "The cert file you specified %s does not exist" msgstr "指定的憑證檔 %s 不存在" msgid "The config file that has the swift account(s)configs." msgstr "具有 Swift 帳戶配置的配置檔。" msgid "The current status of this task" msgstr "此作業的現行狀態" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "存放映像檔快取目錄 %(image_cache_dir)s 的裝置不支援 xattr。您可能需要編輯 " "fstab 並將 user_xattr 選項新增至存放快取目錄之裝置的適當行。" msgid "The driver to use for image cache management." msgstr "要用於映像檔快取管理的驅動程式。" #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "版本 %s 的格式無效。請使用 semver 表示法" msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "映像檔將自動轉換成的格式。當使用 RBD 後端時,應該將此項設為「原始格式」" #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "給定的 URI 無效。請從下列受支援的 URI %(supported)s 清單中指定有效的 URI" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "正在接聽除錯連線之 pydev 處理程序的主機名稱/IP" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "映像檔 %s 已存在於從屬伺服器上,但檢查時並未找到該映像檔。這表示無權查看從屬" "伺服器上的所有映像檔。" #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "送入的構件二進位大型物件太大:%s" #, python-format msgid "The incoming image is too large: %s" msgstr "送入的映像檔太大:%s" #, python-format msgid "The key file you specified %s does not exist" msgstr "指定的金鑰檔 %s 不存在" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "容許的映像檔位置數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "此映像檔容許的映像檔成員數目已超出此限制。已嘗試:%(attempted)s,上限:" "%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "容許的映像檔內容數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "容許的映像檔內容數目已超出此限制。已嘗試:%(num)s,上限:%(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "容許的映像檔標籤數目已超出此限制。已嘗試:%(attempted)s,上限:%(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "位置 %(location)s 已存在" #, python-format msgid "The location data has an invalid ID: %d" msgstr "位置資料的 ID 無效:%d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "內容保護檔案的位置。此檔案包含用於內容保護的規則及與之相關聯的角色/原則。如果" "未指定此配置值,則依預設,不會強制施行內容保護。如果指定了值但找不到該檔案," "則 glance-api 服務將不會啟動。" #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "未刪除名稱為 %(record_name)s 的 meta 資料定義 %(record_type)s。其他記錄仍參照" "此 meta 資料定義。" #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "Meta 資料定義名稱空間 %(namespace_name)s 已經存在。" #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "在下列名稱空間中,找不到名稱為 %(object_name)s 的 meta 資料定義物件:" "%(namespace_name)s。" #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "在下列名稱空間中,找不到名稱為 %(property_name)s 的 meta 資料定義內容:" "%(namespace_name)s。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "資源類型 %(resource_type_name)s 與名稱空間 %(namespace_name)s 的meta 資料定義" "資源類型關聯已存在。" #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "找不到資源類型 %(resource_type_name)s 與名稱空間 %(namespace_name)s 的meta 資" "料定義資源類型關聯。" #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "找不到名稱為 %(resource_type_name)s 的 meta 資料定義資源類型。" #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "在下列名稱空間中,找不到名稱為 %(name)s 的 meta 資料定義標籤:" "%(namespace_name)s。" msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "將用於執行引擎的模式。可以為「串行」或「平行」。" msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "將建立用來處理服務要求的子程序工作程式數目。預設值將等於可用的 CPU 數目。" msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "引擎同時執行的平行活動數目。當引擎模式為「平行」時,該值可以大於 1。" msgid "The parameters required by task, JSON blob" msgstr "作業所需的參數:JSON 二進位大型物件" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "要在登錄伺服器的 SSL 連線(如果有的話)中使用的憑證檔案路徑。或者,您可以將 " "GLANCE_CLIENT_CERT_FILE 環境變數設定為 CA 憑證檔案的檔案路徑" msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "要在登錄伺服器的 SSL 連線(如果有的話)中使用的憑證管理中心憑證檔案的路徑。或" "者,您可以將 GLANCE_CLIENT_CA_FILE 環境變數設定為 CA 憑證檔案的檔案路徑。" msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "要在登錄伺服器的 SSL 連線(如果有的話)中使用的金鑰檔路徑。或者,您可以將 " "GLANCE_CLIENT_KEY_FILE 環境變數設定為金鑰檔的檔案路徑" msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "將用於映像檔快取管理的 SQLite 檔案資料庫路徑。" msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "API 伺服器將等待登錄要求完成的時段(以秒為單位)。值 0 表示無逾時。" msgid "The port on which a pydev process is listening for connections." msgstr "pydev 處理程序正在用來接聽連線的埠。" msgid "The port on which the server will listen." msgstr "伺服器將在其上進行接聽的埠。" msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "用於與登錄伺服器進行通訊的通訊協定。HTTP 或 HTTPS。" #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "在給定綱目 %(schema)s 下提供的主體 %(body)s 無效" msgid "The provided image is too large." msgstr "所提供的映像檔太大。" #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "提供的路徑 '%(path)s' 無效:%(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "對預設 Swift 帳戶/支援儲存庫參數的參照,以用於新增映像檔。" msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "鑑別服務的區域。如果 \"use_user_token\" 未生效並且使用了 Keystone 鑑別,則可" "以指定區域名稱。" msgid "The request returned 500 Internal Server Error." msgstr "要求傳回了「500 內部伺服器錯誤」。" msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "要求傳回了「503 無法使用服務」。通常,在服務超載或其他暫時性服務中斷時發生。" #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "要求傳回了「302 多重選擇」。這通常表示要求 URI 中尚不包含版本指示符。\n" "\n" "傳回了回應內文:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "要求傳回了「413 要求實體太大」。這通常表示已違反評比限制或配額臨界值。\n" "\n" "回應內文:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "要求傳回了非預期的狀態:%(status)s。\n" "\n" "回應內文:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "已取消啟動所要求的映像檔。已禁止下載映像檔資料。" msgid "The result of current task, JSON blob" msgstr "現行作業的結果:JSON 二進位大型物件" msgid "The signature data was not properly encoded using base64" msgstr "簽章資料未使用 base64 進行正確編碼" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "資料的大小 %(image_size)s 將超出該限制。剩餘 %(remaining)s 個位元組。" msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "要用於清除映像檔的執行緒儲存區大小。預設值為 1,這表示序列清除。任何大於 1 的" "值都指示可平行清除的映像檔數目上限。" #, python-format msgid "The specified member %s could not be found" msgstr "找不到指定的成員 %s" #, python-format msgid "The specified metadata object %s could not be found" msgstr "找不到指定的 meta 資料物件 %s" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "找不到指定的 meta 資料標籤 %s" #, python-format msgid "The specified namespace %s could not be found" msgstr "找不到指定的名稱空間 %s" #, python-format msgid "The specified property %s could not be found" msgstr "找不到指定的內容 %s" #, python-format msgid "The specified resource type %s could not be found " msgstr "找不到指定的資源類型 %s" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "只能將已刪除映像檔位置的狀態設為 'pending_delete' 或'deleted'" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "只能將已刪除映像檔位置的狀態設為 'pending_delete' 或'deleted'。" msgid "The status of this image member" msgstr "此映像檔成員的狀態" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "用來取得儲存庫喜好設定順序的儲存庫名稱。名稱必須由'stores' 配置選項所定義的其" "中一個儲存庫進行登錄。當您使用 'store_type' 選項來作為 'location_strategy' 配" "置選項所定義的映像檔位置策略時,將套用此選項。" msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "用於進行鑑別的策略。如果 \"use_user_token\" 未生效,則可以指定鑑別策略。" #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "目標成員 %(member_id)s 已經與映像檔%(image_id)s 相關聯。" msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "管理使用者的承租人名稱。如果 \"use_user_token\" 未生效,則可以指定管理承租人" "名稱。" msgid "The type of task represented by this content" msgstr "此內容所表示的作業類型" msgid "The unique namespace text." msgstr "唯一的名稱空間文字。" msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "上限(累計快取的大小上限,以位元組為單位),超出該上限時,快取刪改程式(如果" "在執行中)將開始清除映像檔快取。" msgid "The user friendly name for the namespace. Used by UI if available." msgstr "對使用者更為友善的名稱空間名稱。如果有的話,則由使用者介面使用。" msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "要針對 Swift 鑑別服務進行鑑別的使用者(已淘汰)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "Socket 選項 TCP_KEEPIDLE 的值。這是 TCP開始傳送保持作用中探針之前連線必須閒置" "的秒數。" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "%(error_key_name)s %(error_filename)s 有問題。請驗證問題。錯誤:%(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "%(error_key_name)s %(error_filename)s 有問題。請驗證問題。OpenSSL 錯誤:" "%(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "金鑰組有問題。請確認憑證 %(cert_file)s 及金鑰 %(key_file)s 是配對的。OpenSSL " "錯誤 %(ce)s" msgid "There was an error configuring the client." msgstr "配置用戶端時發生錯誤。" msgid "There was an error connecting to a server" msgstr "連接至伺服器時發生錯誤" msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "此配置值指示在內容保護檔案中使用的是 \"roles\" 還是 \"policies\"。" msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "目前不允許對 Glance 作業執行這項作業。根據它們的 expires_at內容,將在達到時間" "之後自動刪除它們。" msgid "This operation is currently not permitted on Glance images details." msgstr "目前不允許對 Glance 映像檔詳細資料執行這項作業。" msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "此值設定將用來判斷映像檔位置順序的策略。目前,Glance 隨附了兩個策略 " "'location_order' 及 'store_type'。" msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "作業在成功或失敗後存活的時間(小時)" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "用戶端連線的 Socket 作業逾時。如果送入的連線處於閒置的時間達到此秒數,則會將" "其關閉。值 '0' 表示永久等待。" msgid "Too few arguments." msgstr "引數太少。" msgid "Too few locations" msgstr "位置太少" msgid "Too many locations" msgstr "位置太多" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "大小總計為 %(size)d 個位元組,其包含映像檔數目為 %(img_count)d" msgid "Turn on/off delayed delete." msgstr "開啟/關閉延遲刪除。" msgid "Type version has to be a valid semver string" msgstr "類型版本必須是有效的 semver 字串" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "URI 中不能多次出現某一架構。如果所指定的 URI 類似於 swift://user:pass@http://" "authurl.com/v1/container/obj,則需要將其變更成使用 swift+http:// 架構,例如:" "swift+http://user:pass@authurl.com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "用來存取外部儲存庫中所保留之映像檔的 URL" msgid "Unable to PUT to non-empty collection" msgstr "無法對非空集合執行 PUT 作業" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "無法建立 PID 檔案 %(pid)s。要以非 root 使用者身分執行嗎?\n" "正在撤回而使用暫存檔,您可以使用下列指令來停止 %(service)s 服務:\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "無法依不明運算子 '%s' 進行過濾。" msgid "Unable to filter on a range with a non-numeric value." msgstr "無法對包含非數值的範圍進行過濾。" msgid "Unable to filter on a unknown operator." msgstr "無法依不明運算子進行過濾。" msgid "Unable to filter using the specified operator." msgstr "無法使用指定的運算子進行過濾。" msgid "Unable to filter using the specified range." msgstr "無法使用指定的範圍進行過濾。" #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "在「JSON 綱目」變更中找不到 '%s'" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "在 JSON 綱目變更中找不到 `op`。它必須是下列其中一項:%(available)s。" msgid "Unable to get legacy image" msgstr "無法取得舊式映像檔" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "無法增加檔案描述子限制。要以非 root 使用者身分執行嗎?" #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "無法從配置檔 %(conf_file)s 載入 %(app_name)s。\n" "發生錯誤:%(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "無法載入綱目:%(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "找不到 %s 的 paste 配置檔。" msgid "Unable to modify collection in immutable or readonly property" msgstr "無法修改不可變內容或唯讀內容中的集合" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "無法擷取 ID 為 %s 的憑證" msgid "Unable to retrieve request id from context" msgstr "無法從環境定義中擷取要求 ID" msgid "Unable to specify artifact type explicitly" msgstr "無法明確指定構件類型" msgid "Unable to specify artifact type version explicitly" msgstr "無法明確指定構件類型版本" msgid "Unable to specify version if multiple types are possible" msgstr "如果可能存在多個類型,則無法指定版本" msgid "Unable to specify version if type is not specified" msgstr "如果未指定類型,則無法指定版本" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "無法上傳映像檔 %(image_id)s 的重複映像檔資料:%(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "無法驗證簽章,因為該算法在此系統上不受支援" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "無法驗證簽章:%(reason)s" msgid "Unauthorized image access" msgstr "未獲授權的映像檔存取" msgid "Unexpected body type. Expected list/dict." msgstr "非預期的內文類型。預期為清單/字典。" #, python-format msgid "Unexpected response: %s" msgstr "非預期的回應:%s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "不明的鑑別策略 '%s'" #, python-format msgid "Unknown command: %s" msgstr "不明指令:%s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "不明的排序方向,必須為 'desc' 或 'asc'" msgid "Unrecognized JSON Schema draft version" msgstr "無法辨識的「JSON 綱目」草稿版本" msgid "Unrecognized changes-since value" msgstr "無法辨識 changes-since 值" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "不支援的 sort_dir。可接受的值:%s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "不支援的 sort_key。可接受的值:%s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "值 %(value)d 已超出範圍,不得超過 %(max)d" msgid "Value is greater than maximum" msgstr "值大於上限" msgid "Value is less than minimum" msgstr "值小於下限" msgid "Value is required" msgstr "需要值" #, python-format msgid "Version component is too large (%d max)" msgstr "版本元件太大(上限為 %d)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "版本無效:%(reason)s" msgid "Virtual size of image in bytes" msgstr "映像檔的虛擬大小(以位元組為單位)" msgid "Visibility must be either \"public\" or \"private\"" msgstr "可見性必須是 \"public\" 或 \"private\"" #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "等待 PID %(pid)s (%(file)s) 當掉已達到 15 秒;正在放棄" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "如果為 false,則無論 available_plugins 為何,均無法載入任何構件。如果為 " "true,則可以載入構件。" msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "在 SSL 模式下執行伺服器時,必須在配置檔中指定 cert_file 及 key_file 選項值" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "如果為 True,則此選項會將映像檔的擁有者設為承租人。否則,映像檔的擁有者將會是" "發出要求的已鑑別使用者。" msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "如果在登錄伺服器的連線中使用 SSL,則不需要透過憑證管理中心進行驗證。此登錄相" "當於對 API 使用 glanceclient 時,在指令行上指定 --insecure。" msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "是否要容許使用者指定超出映像檔綱目所提供內容的映像檔內容" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "是否要將後端映像檔位置包含在映像檔內容中。例如,如果使用檔案系統儲存庫,則會" "在 'direct_url' meta 資料欄位中將 URL \"file:///path/to/image\" 傳回給使用" "者。顯示儲存位置可能會導致安全風險,因此請小心使用此設定!將此項設為 true 將" "置換 show_image_direct_url 選項。" msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "是否要將後端映像檔儲存體位置包含在映像檔內容中。顯示儲存體位置可能會導致安全" "風險,因此請小心使用此設定!" msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "是否要在向登錄發出要求時,透過包含使用者及承租人資訊的標頭來傳遞。這容許登錄" "在不具有 keystonemiddleware 的 auth_token 中介軟體時使用環境定義中介軟體,從" "而移除 keystone 鑑別服務呼叫。建議在使用此選項時,透過使用 auth_token 中介軟" "體之外的中介軟體,來保護 Glance API 和 Glance 登錄之間的通訊。" msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "是否要在向登錄發出要求時透過使用者記號來傳遞。如果要在上傳大型檔案期間防止與" "記號有效期限相關的失敗,建議將此參數設定為 False。如果 \"use_user_token\" 未" "生效,則可以指定管理認證。" msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "非同步作業的工作目錄。這裡所設定的目錄將用來對映像檔進行操作 - 一般在將映像檔" "匯入目的地儲存庫之前進行。提供工作目錄時,請確保提供足夠的空間,以讓並行作業" "能夠有效執行而不會出現空間不足狀況。可以將 'max_workers' 的數目或執行中工作程" "式的數目與平均映像檔大小(例如 500MB)相乘來粗略估計所需空間。應該根據部署的" "平均大小來估計映像檔大小。請注意,視執行中的作業而定,您可能需要根據作業的目" "的將此數目與某個因數相乘。例如,如果已啟用映像檔轉換,則您可能想要將可用大小" "加倍。話雖如此,但請注意,這些僅是估計,且您應該根據最差的情況來進行估計,並" "且準備好萬一估計錯誤也能有應對措施。" #, python-format msgid "Wrong command structure: %s" msgstr "錯誤的指令結構:%s" msgid "You are not authenticated." msgstr "您沒有進行鑑別。" msgid "You are not authorized to complete this action." msgstr "您未獲授權來完成此動作。" #, python-format msgid "You are not authorized to lookup image %s." msgstr "您未獲授權來查閱映像檔 %s。" #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "您未獲授權來查閱映像檔 %s 的成員。" #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "不允許您在 '%s' 擁有的名稱空間中建立標籤" msgid "You are not permitted to create image members for the image." msgstr "不允許您給映像檔建立映像檔成員。" #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "不允許您建立擁有者為 '%s' 的映像檔。" #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "不允許您建立擁有者為 '%s' 的名稱空間" #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "不允許您建立擁有者為 '%s' 的物件" #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "不允許您建立擁有者為 '%s' 的內容" #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "不允許您建立擁有者為 '%s' 的 resource_type" #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "不允許您以擁有者身分來建立此作業:%s" msgid "You are not permitted to deactivate this image." msgstr "不允許您取消啟動此映像檔。" msgid "You are not permitted to delete this image." msgstr "不允許您刪除此映像檔。" msgid "You are not permitted to delete this meta_resource_type." msgstr "不允許您刪除此 meta_resource_type。" msgid "You are not permitted to delete this namespace." msgstr "不允許您刪除此名稱空間。" msgid "You are not permitted to delete this object." msgstr "不允許您刪除此物件。" msgid "You are not permitted to delete this property." msgstr "不允許您刪除此內容。" msgid "You are not permitted to delete this tag." msgstr "不允許您刪除此標籤。" #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "不允許您修改此 %(resource)s 上的 '%(attr)s'。" #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "不允許您修改此映像檔上的 '%s'。" msgid "You are not permitted to modify locations for this image." msgstr "不允許您修改此映像檔的位置。" msgid "You are not permitted to modify tags on this image." msgstr "不允許您修改此映像檔上的標籤。" msgid "You are not permitted to modify this image." msgstr "不允許您修改此映像檔。" msgid "You are not permitted to reactivate this image." msgstr "不允許您重新啟動此映像檔。" msgid "You are not permitted to set status on this task." msgstr "不允許您在此作業上設定狀態。" msgid "You are not permitted to update this namespace." msgstr "不允許您更新此名稱空間。" msgid "You are not permitted to update this object." msgstr "不允許您更新此物件。" msgid "You are not permitted to update this property." msgstr "不允許您更新此內容。" msgid "You are not permitted to update this tag." msgstr "不允許您更新此標籤。" msgid "You are not permitted to upload data for this image." msgstr "不允許您給此映像檔上傳資料。" #, python-format msgid "You cannot add image member for %s" msgstr "無法給 %s 新增映像檔成員" #, python-format msgid "You cannot delete image member for %s" msgstr "無法刪除 %s 的映像檔成員" #, python-format msgid "You cannot get image member for %s" msgstr "無法取得 %s 的映像檔成員" #, python-format msgid "You cannot update image member %s" msgstr "無法更新映像檔成員 %s" msgid "You do not own this image" msgstr "您不是此映像檔的擁有者" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "您已選取在連接時使用 SSL,並且提供了憑證,但未提供 key_file 參數,也沒有設定 " "GLANCE_CLIENT_KEY_FILE 環境變數" msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "您已選取在連接時使用 SSL,並且提供了金鑰,但未提供 cert_file 參數,也沒有設" "定 GLANCE_CLIENT_CERT_FILE 環境變數" msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__() 取得非預期的關鍵字引數 '%s'" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "更新時無法從 %(current)s 轉移至 %(next)s(需要 from_state = %(from)s)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "自訂內容 (%(props)s) 與基本內容相衝突" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "此平台上無法使用 eventlet 'poll' 及 'selects' 中心。" msgid "is_public must be None, True, or False" msgstr "is_public 必須是 None、True 或 False" msgid "limit param must be an integer" msgstr "限制參數必須是整數" msgid "limit param must be positive" msgstr "限制參數必須是正數" #, python-format msgid "location: %s data lost" msgstr "位置:%s 資料已遺失" msgid "md5 hash of image contents." msgstr "映像檔內容的 md5 雜湊值。" #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image() 取得非預期的關鍵字 %s" msgid "protected must be True, or False" msgstr "protected 必須是 True 或 False" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "無法啟動 %(serv)s。取得錯誤:%(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id 太長,大小上限為 %s" glance-12.0.0/glance/locale/ko_KR/0000775000567000056710000000000012701407204017656 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/ko_KR/LC_MESSAGES/0000775000567000056710000000000012701407204021443 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/locale/ko_KR/LC_MESSAGES/glance-log-warning.po0000664000567000056710000002313112701407051025456 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Mario Cho , 2014 # OpenStack Infra , 2015. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-24 10:40+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 12:38+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "" "%(image_id)s: field %(key)s differs (source is %(master_value)s, destination " "is %(slave_value)s)" msgstr "" "%(image_id)s: 필드 %(key)s이(가) 다름(소스는 %(master_value)s, 대상은 " "%(slave_value)s)" msgid "" "/v3 controller is deprecated and will be removed from glance-api soon. " "Remove the reference to it from glance-api-paste.ini configuration file and " "use Glance Artifact Service API instead" msgstr "" "/v3 컨트롤러는 더 이상 사용되지 않으므로 glance-api에서 곧 제거됩니다. " "glance-api-paste.ini 구성 파일에서 참조를 제거하고 대신 Glance Artifact " "Service API를 사용하십시오." #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "이미지 %(id)s에 대한 액세스가 거부되었지만 '찾을 수 없음'이 리턴됨" #, python-format msgid "An optional task has failed, the failure was: %s" msgstr "옵션 작업이 실패했습니다. 실패 내용: %s" #, python-format msgid "Artifact with id=%s is not accessible" msgstr "id=%s인 아티팩트에 액세스할 수 없음" #, python-format msgid "Artifact with id=%s not found" msgstr "id=%s인 아티팩트를 찾을 수 없음" msgid "Artifact with the specified type, name and version already exists" msgstr "지정된 유형, 이름, 버전을 사용하는 아티팩트가 이미 있음" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%d" msgstr "" "지정된 유형, 이름, 버전을 사용하는 아티팩트에 이미 직접 종속성= %d이(가) 있" "음" #, python-format msgid "" "Artifact with the specified type, name and versions already has the direct " "dependency=%s" msgstr "" "지정된 유형, 이름, 버전을 사용하는 아티팩트에 이미 직접 종속성=%s이(가) 있음" msgid "Attempted to modify image user did not own." msgstr "사용자가 소유하지 않은 이미지를 수정하려고 시도했습니다." #, python-format msgid "Cached image file '%s' doesn't exist, unable to delete" msgstr "캐시된 이미지 파일 '%s'이(가) 없음, 삭제할 수 없음" #, python-format msgid "Can't load artifact %s: load disabled in config" msgstr "아티팩트 %s을(를) 로드할 수 없음: 구성에서 로드가 비활성화됨" #, python-format msgid "Can't load artifact %s: not in available_plugins list" msgstr "아티팩트 %s을(를) 로드할 수 없음: available_plugins 목록에 없음" #, python-format msgid "Could not find image %s" msgstr "이미지 %s을(를) 찾을 수 없음" #, python-format msgid "" "Could not find schema properties file %s. Continuing without custom " "properties" msgstr "" "스키마 특성 파일 %s을(를) 찾을 수 없습니다. 사용자 지정 특성 없이 계속합니다." #, python-format msgid "Could not find task %s" msgstr "작업 %s을(를) 찾을 수 없음" #, python-format msgid "Could not find task info %s" msgstr "작업 정보 %s을(를) 찾을 수 없음" msgid "Deadlock detected. Retrying..." msgstr "데드락 발견됨, 재시도중 " #, python-format msgid "Duplicate entry for values: %s" msgstr "중복된 엔트리 값: %s" #, python-format msgid "Expected table %(tbl)s was not found in DB." msgstr "예상 테이블 %(tbl)s을(를) DB에서 찾을 수 없습니다." #, python-format msgid "" "Failed to activate image %s in registry. About to delete image bits from " "store and update status to 'killed'." msgstr "" "레지스트리에서 이미지 %s을(를) 활성화하는 데 실패했습니다. 곧 저장소에서 이미" "지 비트를 삭제하고 상태를 'killed'로 업데이트합니다." #, python-format msgid "Failed to decrypt location value for image %(image_id)s" msgstr "이미지 %(image_id)s의 위치 값을 암호 해제하는 데 실패" #, python-format msgid "Failed to delete blob %s in store from URI" msgstr "URI에서 저장소의 blob %s을(를) 삭제하는 데 실패" #, python-format msgid "Failed to delete file %(path)s. Got error: %(e)s" msgstr "파일 %(path)s을(를) 삭제하지 못했습니다. 오류 발생: %(e)s" #, python-format msgid "Failed to delete image %s in store from URI" msgstr "URI에서 저장소의 이미지 %s을(를) 삭제하는 데 실패" #, python-format msgid "Failed to find task %(task_id)s. Reason: %(reason)s" msgstr "작업 %(task_id)s을(를) 찾지 못했습니다. 이유: %(reason)s" msgid "Failed to successfully cache all images in queue." msgstr "대기열에서 모든 이미지를 성공적으로 캐시하지 못했습니다." #, python-format msgid "" "Fetch of cache file failed (%(e)s), rolling back by moving " "'%(incomplete_path)s' to '%(invalid_path)s'" msgstr "" "캐시 파일 가져오기 실패%(e)s), '%(incomplete_path)s'을(를) " "'%(invalid_path)s'(으)로 이동하여 롤백" #, python-format msgid "Forbidden to create task. Reason: %(reason)s" msgstr "작업 생성이 금지되었습니다. 이유: %(reason)s" #, python-format msgid "Forbidden to get task %(task_id)s. Reason: %(reason)s" msgstr "작업 %(task_id)s 가져오기가 금지되었습니다. 이유: %(reason)s" msgid "Id not in sort_keys; is sort_keys unique?" msgstr "ID가 sort_keys에 없습니다. sort_keys가 고유합니까?" #, python-format msgid "Image %s entirely missing from the destination" msgstr "이미지 %s이(가) 대상에서 완전히 누락되어 있음" #, python-format msgid "Image '%s' is not active. Not caching." msgstr "이미지 '%s'이(가) 활성이 아닙니다. 캐시되지 않습니다." #, python-format msgid "" "Image cache driver '%(driver_module)s' failed to configure. Got error: " "'%(config_err)s" msgstr "" "이미지 캐시 드라이버 '%(driver_module)s'을(를) 구성하지 못했습니다. 오류 발" "생: '%(config_err)s" #, python-format msgid "" "Image cache driver '%(driver_name)s' failed to load. Got error: " "'%(import_err)s." msgstr "" "이미지 캐시 드라이버 '%(driver_name)s'을(를) 로드하지 못했습니다. 오류 발생: " "'%(import_err)s." #, python-format msgid "Invalid marker. Image %(id)s could not be found." msgstr "올바르지 않은 마커입니다. 이미지 %(id)s을(를) 찾을 수 없습니다." #, python-format msgid "Invalid membership association specified for image %(id)s" msgstr "이미지 %(id)s에 대해 올바르지 않은 멤버십 연관이 지정됨" #, python-format msgid "Member %(id)s not found" msgstr "구성원 %(id)s 발견되지 않음" #, python-format msgid "Metadata tag not found for id %s" msgstr "ID %s에 대한 메타데이터 태그를 찾을 수 없음" #, python-format msgid "No metadata found for image '%s'" msgstr " image의 메어 자료가 없음 '%s'" #, python-format msgid "" "One or more image locations couldn't be scrubbed from backend. Leaving image " "'%s' in 'pending_delete' status" msgstr "" "백엔드에서 하나 이상의 이미지 위치를 삭제할 수 없습니다. 이미지 '%s'의 상태" "를 'pending_delete'로 남겨 둠" #, python-format msgid "Show level %s is not supported in this operation" msgstr "이 조작에서는 표시 레벨 %s이(가) 지원되지 않음" #, python-format msgid "Task %(task_id)s failed with exception %(error)s" msgstr "작업 %(task_id)s에 실패하고 예외 %(error)s 발생" msgid "The `eventlet` executor has been deprecated. Use `taskflow` instead." msgstr "" "`eventlet` 실행자는 더 이상 사용되지 않습니다. 대신 `taskflow`를 사용하십시" "오." #, python-format msgid "" "The conversion format is None, please add a value for it in the config file " "for this task to work: %s" msgstr "변환 포맷이 None임, 이 태스크를 통해 작업할 구성 파일에 값 추가: %s" msgid "Unable to get deleted image" msgstr "삭제된 이미지를 가져올 수 없음" #, python-format msgid "Unable to get deleted task %s" msgstr "삭제된 작업 %s을(를) 가져올 수 없음" msgid "Unable to get unowned image" msgstr "소유하지 않은 이미지를 가져올 수 없음" #, python-format msgid "Unrecognised child %s" msgstr "인식되지 않은 하위 %s" #, python-format msgid "" "User %(user)s attempted to upload an image of size %(size)d that will exceed " "the quota. %(remaining)d bytes remaining." msgstr "" "사용자 %(user)s이(가) 할당량을 초과하는 %(size)d 크기의 이미지를 업로드하려 " "했습니다. %(remaining)d바이트가 남아 있습니다." #, python-format msgid "" "User %(user)s attempted to upload an image of unknown size that will exceed " "the quota. %(remaining)d bytes remaining." msgstr "" "사용자 %(user)s이(가) 할당량을 초과하는 알 수 없는 크기의 이미지를 업로드하" "려 했습니다. %(remaining)d바이트가 남아 있습니다." #, python-format msgid "User lacks permission to share image %(id)s" msgstr "사용자가 이미지 %(id)s을(를) 공유할 권한이 없음" glance-12.0.0/glance/locale/ko_KR/LC_MESSAGES/glance-log-info.po0000664000567000056710000002372112701407051024751 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # Mario Cho , 2014 # OpenStack Infra , 2015. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-24 10:40+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 12:09+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%(task_id)s of %(task_type)s completed" msgstr "%(task_type)s 중 %(task_id)s이(가) 완료됨" msgid "" "'metadata_encryption_key' was not specified in the config file or a config " "file was not specified. This means that this migration is a NOOP." msgstr "" "'metadata_encryption_key'가 구성 파일에 지정되지 않았거나 구성 파일이 지정되" "지 않았습니다. 즉, 이 마이그레이션이 NOOP입니다." #, python-format msgid "Access denied to image %(id)s but returning 'not found'" msgstr "이미지 %(id)s에 대한 액세스가 거부되었지만 '찾을 수 없음'이 리턴됨" msgid "All workers have terminated. Exiting" msgstr "모든 작업자가 종료되었습니다. 종료" #, python-format msgid "Artifact %s has been successfully loaded" msgstr "아티팩트 %s이(가) 성공적으로 로드됨" #, python-format msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" msgstr "%(funcname)s 호출: args=%(args)s, kwargs=%(kwargs)s" msgid "Caught keyboard interrupt. Exiting." msgstr "키보드 인터럽트가 발견되었습니다. 종료합니다." #, python-format msgid "Child %d exiting normally" msgstr "하위 %d을(를) 정상적으로 종료" #, python-format msgid "Cleaning up %s after exceeding the quota" msgstr "할당량을 초과한 후 %s 정리" #, python-format msgid "Cleaning up %s after exceeding the quota." msgstr "할당량을 초과한 후 %s을(를) 정리합니다." #, python-format msgid "Considering: %s" msgstr "검토중: %s" #, python-format msgid "Could not find artifact %s" msgstr "아티팩트 %s을(를) 찾을 수 없음" msgid "Daemon Shutdown on KeyboardInterrupt" msgstr "KeyboardInterrupt에서 디먼 종료" msgid "Defaulting to SQLite driver." msgstr " SQLite 드라이버로 기본 구성." #, python-format msgid "Delete denied for public image %(id)s" msgstr "공용 이미지 %(id)s의 삭제가 거부됨" #, python-format msgid "Deleted %(rows)d row(s) from table %(tbl)s" msgstr "테이블 %(tbl)s에서 삭제된 %(rows)d 행" #, python-format msgid "File %s loaded to database." msgstr "데이터베이스에 파일 %s이(가) 로드되었습니다." #, python-format msgid "Image %(id)s not found" msgstr "%(id)s 이미지를 찾을 수 없음" #, python-format msgid "Image %s has been scrubbed successfully" msgstr "이미지 %s이(가) 성공적으로 삭제됨" #, python-format msgid "Image %s is being synced" msgstr "이미지 %s이(가) 동기화됨" #, python-format msgid "Image %s is deactivated" msgstr "%s 이미지가 비활성화됨" #, python-format msgid "Image %s is reactivated" msgstr "%s 이미지가 재활성화됨" #, python-format msgid "Image %s is scrubbed from a location." msgstr "위치에서 이미지 %s이(가) 삭제됩니다." #, python-format msgid "Image %s metadata has changed" msgstr "이미지 %s 메타데이터가 변경됨" #, python-format msgid "Image cache loaded driver '%s'." msgstr "이미지 캐시가 '%s' 드라이버를 로드했습니다." #, python-format msgid "" "Image location for image '%s' not found in backend; Marking image location " "deleted in db." msgstr "" "이미지 '%s'의 이미지 위치가 백엔드에 없음, db에서 이미지 위치를 삭제됨으로 표" "시." msgid "Initialized gzip middleware" msgstr "gzip 미들웨어로 초기화됨 " msgid "Initialized image cache management middleware" msgstr "초기화된 이미지 캐시 관리 미들웨어" msgid "Initialized image cache middleware" msgstr "초기화된 이미지 캐시 미들웨어" #, python-format msgid "Initializing scrubber with configuration: %s" msgstr "구성으로 삭제 프로그램(scrubber) 초기화: %s" #, python-format msgid "" "Loading known task scripts for task_id %(task_id)s of type %(task_type)s" msgstr "" "%(task_type)s 유형의 task_id %(task_id)s에 대한 알려진 작업 스크립트 로드" msgid "Metadata loading finished" msgstr "메타데이터 로드 완료" #, python-format msgid "Namespace %(namespace)s saved in %(file)s" msgstr "%(file)s에 네임스페이스 %(namespace)s이(가) 저장됨" #, python-format msgid "Not queueing image '%s'. Already being written to cache" msgstr "이미지'%s'을(를) 대기열에 두지 않습니다. 이미 캐시에 기록 중입니다." #, python-format msgid "Not queueing image '%s'. Already cached." msgstr "이미지'%s'을(를) 대기열에 두지 않습니다. 이미 캐시되었습니다." #, python-format msgid "Not queueing image '%s'. Already queued." msgstr "이미지'%s'을(를) 대기열에 두지 않습니다. 이미 대기열에 있습니다." #, python-format msgid "Overwriting namespace %s" msgstr "네임스페이스 %s 겹쳐쓰기" #, python-format msgid "" "Purging deleted rows older than %(age_in_days)d day(s) from table %(tbl)s" msgstr "테이블 %(tbl)s에서 %(age_in_days)d일보다 오래된 삭제된 행 제거" #, python-format msgid "Reaped %(reaped)s %(entry_type)s cache entries" msgstr "획득한 %(reaped)s %(entry_type)s 캐시 항목" #, python-format msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" msgstr "올바르지 않은 이미지 id '%(bad_id)s'에 대한 이미지 생성 요청 거부" #, python-format msgid "Removed dead child %s" msgstr "사용하지 않는 하위 %s 제거" #, python-format msgid "Removed invalid cache file %s" msgstr "올바르지 않은 캐시 파일 %s 제거" #, python-format msgid "Removed stale child %s" msgstr "시간이 경과된 하위 %s 제거" #, python-format msgid "Removed stalled cache file %s" msgstr "시간이 경과된 캐시 파일 %s 제거" #, python-format msgid "Returning %(funcname)s: %(output)s" msgstr "%(funcname)s 리턴: %(output)s" #, python-format msgid "Scrubbing image %(id)s from %(count)d locations." msgstr "%(count)d 위치에서 이미지 %(id)s 삭제(scrub)" #, python-format msgid "Skipping namespace %s. It already exists in the database." msgstr "네임스페이스 %s을(를) 건너뜁니다. 이미 데이터베이스에 있습니다." #, python-format msgid "Started child %s" msgstr "하위 %s이(가) 시작됨" #, python-format msgid "Starting %d workers" msgstr "%d 작업자 시작 중" #, python-format msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgstr "디먼 시작: wakeup_time=%(wakeup_time)s threads=%(threads)s" msgid "Starting single process server" msgstr "단일 프로세스 서버 시작" #, python-format msgid "Storing: %s" msgstr "정렬중: %s" #, python-format msgid "Successfully cached all %d images" msgstr "모든 %d 이미지를 성공적으로 캐시함" #, python-format msgid "Successfully created image %(id)s" msgstr "이미지 %(id)s을(를) 성공적으로 생성" #, python-format msgid "Successfully deleted a membership from image %(id)s" msgstr "이미지 %(id)s의 멤버십이 성공적으로 삭제됨" #, python-format msgid "Successfully deleted image %(id)s" msgstr "이미지 %(id)s을(를) 성공적으로 삭제" #, python-format msgid "Successfully updated a membership for image %(id)s" msgstr "이미지 %(id)s의 멤버십이 성공적으로 업데이트됨" #, python-format msgid "Successfully updated memberships for image %(id)s" msgstr "이미지 %(id)s의 멤버십이 성공적으로 업데이트됨" #, python-format msgid "Successfully verified signature for image %s" msgstr "이미지 %s의 서명 검증 성공" #, python-format msgid "Table %s has been cleared" msgstr "테이블 %s을(를) 지움" #, python-format msgid "Task %(task_id)s beginning import execution." msgstr "가져오기 실행을 시작하는 작업 %(task_id)s." #, python-format msgid "Task %(task_id)s: Could not import image file %(image_data)s" msgstr "작업 %(task_id)s: 이미지 파일 %(image_data)s을(를) 가져올 수 없음" #, python-format msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" msgstr "작업 %(task_id)s: 가져올 이미지 데이터 uri %(data_uri)s 확보" #, python-format msgid "" "Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" msgstr "작업 [%(task_id)s]상태가 %(cur_status)s에서 %(new_status)s(으)로 변경" msgid "Triggering asynchronous copy from external source" msgstr "외부 소스에서 비동기 복사 트리거" #, python-format msgid "Unable to create trust: %s Use the existing user token." msgstr "트러스트를 생성할 수 없음: %s 기존 사용자 토큰을 사용하십시오." #, python-format msgid "Unable to delete trust %(trust)s: %(msg)s" msgstr "트러스트 %(trust)s을(를) 삭제할 수 없음: %(msg)s" msgid "Unable to get deleted image" msgstr "삭제된 이미지를 가져올 수 없음" #, python-format msgid "Update denied for public image %(id)s" msgstr "공용 이미지 %(id)s의 업데이트가 거부됨" #, python-format msgid "Updating metadata for image %(id)s" msgstr "이미지 %(id)s의 메타데이터 업데이트" #, python-format msgid "Uploaded data of image %s from request payload successfully." msgstr "요청 페이로드에서 이미지 %s의 데이터를 성공적으로 업로드했습니다." #, python-format msgid "creating table %(table)s" msgstr "테이블 %(table)s 생성" #, python-format msgid "dropping table %(table)s" msgstr "테이블 %(table)s 삭제" glance-12.0.0/glance/locale/ko_KR/LC_MESSAGES/glance-log-error.po0000664000567000056710000003147012701407051025147 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # HyunWoo Jo , 2014 # OpenStack Infra , 2015. #zanata # SeYeon Lee , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-24 10:40+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 11:09+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "An error occurred during image.send notification: %(err)s" msgstr "image.send notification 중 에러 발생 : %(err)s" #, python-format msgid "" "An error occurred reading from backend storage for image %(image_id)s: " "%(err)s" msgstr "백엔드 스토리지에서 이미지 %(image_id)s를 읽는 중 에러 발생: %(err)s" #, python-format msgid "" "Backend storage for image %(image_id)s disconnected after writing only " "%(bytes_written)d bytes" msgstr "" "백엔드 스토리지에 이미지 %(image_id)s 를 %(bytes_written)d byte 를 쓰는 도중 " "연결 끊김" #, python-format msgid "Can not get scrub jobs from queue: %s" msgstr "대기열에서 삭제(scrub) 작업을 가져올 수 없음: %s" msgid "Cannot use this parameter with the operator IN" msgstr "연산자 IN과 이 매개변수를 사용할 수 없음" #, python-format msgid "Caught error: %s" msgstr "오류 발견: %s" msgid "Checksum header is missing." msgstr "Checksum header 없음" #, python-format msgid "Copy from external source '%(scheme)s' failed for image: %(image)s" msgstr "외부 소스 '%(scheme)s' 로부터 이미지 복사 실패 : %(image)s" #, python-format msgid "Could not load plugin from %(module)s" msgstr "%(module)s에서 플러그인을 로드할 수 없음" #, python-format msgid "Couldn't find property protection file %(file)s: %(error)s." msgstr "특성 보호 파일 %(file)s을(를) 찾을 수 없음: %(error)s." #, python-format msgid "Encountered a malformed property protection rule %(rule)s: %(error)s." msgstr "형식이 잘못된 특성 보호 규칙 %(rule)s이(가) 발생함: %(error)s." #, python-format msgid "Error executing SQLite call. Got error: %s" msgstr "SQLLite call 수행중 에러 발생 : %s" msgid "" "Error setting up the debug environment. Verify that the option " "pydev_worker_debug_host is pointing to a valid hostname or IP on which a " "pydev server is listening on the port indicated by pydev_worker_debug_port." msgstr "" "디버그 환경을 설정하는 중 오류가 발생했습니다. pydev_worker_debug_host 옵션" "이 pydev_worker_debug_port로 표시된 포트에서 pydev 서버가 청취 중인 올바른 호" "스트 이름 또는 IP를 가르키는지 확인하십시오." #, python-format msgid "Error: cooperative_iter exception %s" msgstr "오류: cooperative_iter 예외 %s" #, python-format msgid "" "Exception encountered while tee'ing image '%(image_id)s' into cache: " "%(error)s. Continuing with response." msgstr "" "이미지 '%(image_id)s'를 캐시에 올리는 중 Exception 발생: %(error)s. 응답과 함" "께 진행" #, python-format msgid "Failed to delete blob %(blob_id)s from store: %(exc)s" msgstr "저장소에서 blob %(blob_id)s을(를) 삭제하는 데 실패: %(exc)s" #, python-format msgid "Failed to delete image %(image_id)s from store: %(exc)s" msgstr "스토어에서 이미지 %(image_id)s 삭제 실패 : %(exc)s" #, python-format msgid "Failed to delete namespace %(namespace)s " msgstr "네임스페이스 삭제 실패 %(namespace)s" #, python-format msgid "Failed to execute introspection %(task_id)s: %(exc)s" msgstr "" "인트로스펙션(introspection) %(task_id)s을(를) 실행하는 데 실패: %(exc)s" #, python-format msgid "Failed to execute security checks on the image %(task_id)s: %(exc)s" msgstr "이미지 %(task_id)s에서 보안 확인을 실행하는 데 실패: %(exc)s" #, python-format msgid "Failed to execute task %(task_id)s: %(exc)s" msgstr "작업 %(task_id)s을(를) 실행하는 데 실패: %(exc)s" #, python-format msgid "Failed to load location strategy module %(module)s: %(e)s" msgstr "위치 전략 모듈 %(module)s을(를) 로드하는 데 실패: %(e)s" #, python-format msgid "Failed to load the %s executor provided in the config." msgstr "설정에서 제공된 %s executor 로드 실패" #, python-format msgid "" "Failed to parse json file %(file_path)s while populating metadata due to: " "%(error_msg)s" msgstr "" "다음 오류로 인해 메타데이터를 채우는 동안 json 파일 %(file_path)s을(를) 구문 " "분석하는 데 실패: %(error_msg)s" #, python-format msgid "Failed to save task %(task_id)s in DB as task_repo is %(task_repo)s" msgstr "" "DB에 task_repo 형태로 작업 %(task_id)s 저장에 실패한 것은 %(task_repo)s" msgid "Failed to upload artifact blob data due to HTTP error" msgstr "HTTP 오류로 인해 아티팩트 blob 데이터를 업로드하는 데 실패" msgid "Failed to upload artifact blob data due to internal error" msgstr "내부 오류로 인해 아티팩트 blob 데이터를 업로드하는 데 실패" msgid "Failed to upload image data due to HTTP error" msgstr "HTTP 에러로 인해 이미지 데이터 업로드 실패" msgid "Failed to upload image data due to internal error" msgstr "내부 에러로 인해 이미지 데이터 업로드 실패" #, python-format msgid "Forbidden to create resource type. Reason: %(reason)s" msgstr "리소스타입 생성이 금지됨. 이유: %(reason)s" #, python-format msgid "" "Glance tried all active locations to get data for blob %s but all have " "failed." msgstr "" "Glance가 모든 활성 위치에서 blob %s의 데이터를 가져오려 했지만 모두 실패했습" "니다." #, python-format msgid "" "Glance tried all active locations to get data for image %s but all have " "failed." msgstr "" "Glance는 모든 활성화 된 위치에서 %s 이미지에 대한 데이터를 수집하려 했으나 모" "두 실패했습니다." #, python-format msgid "" "Image cache contained image file for image '%s', however the registry did " "not contain metadata for that image!" msgstr "" "이미지 캐시에는 이미지 '%s'에 대한 이미지 파일이 있으나 레지스트리는 이미지" "에 대한 메타데이터를 가지고 있지 않습니다!" msgid "Internal error occurred while trying to process task." msgstr "작업을 수행하는 중 내부에러 발생" msgid "Invalid format of swift store config cfg" msgstr "올바르지 않은 형식의 swift 저장소 구성 cfg" #, python-format msgid "Invalid store uri for image: %(image_id)s. Details: %(reason)s" msgstr "이미지 %(image_id)s 에 부적합한 저장 uri. 상세: %(reason)s" #, python-format msgid "" "Invalid value '%s' for 'property_protection_rule_format'. The permitted " "values are 'roles' and 'policies'" msgstr "" "'property_protection_rule_format'의 값 '%s'이(가) 올바르지 않습니다. 허용되" "는 값은 'roles' 및 'policies'입니다." #, python-format msgid "Invalid value for age, %(age)d" msgstr "기간에 올바르지 않은 값, %(age)d" #, python-format msgid "Invalid value for max_rows, %(max_rows)d" msgstr "max_rows에 올바르지 않은 값, %(max_rows)d" #, python-format msgid "Invalid value for option user_storage_quota: %(users_quota)s" msgstr "user_storage_quota 옵션에 올바르지 않은 값: %(users_quota)s" #, python-format msgid "Json schema files not found in %s. Aborting." msgstr "%s에서 Json 스키마 파일을 찾을 수 없음. 중단." #, python-format msgid "" "Malformed property protection rule in [%(prop)s] %(op)s=%(perm)s: '@' and " "'!' are mutually exclusive" msgstr "" "[%(prop)s] %(op)s=%(perm)s의 형식이 잘못된 특성 보호 규칙: '@' 및 '!'는 상호 " "배타적임" #, python-format msgid "" "Multiple policies '%s' not allowed for a given operation. Policies can be " "combined in the policy file" msgstr "" "지정된 조작에는 여러 정책 '%s'이(가) 허용되지 않습니다. 정책은 정책 파일에서 " "결합할 수 있습니다." #, python-format msgid "Not respawning child %d, cannot recover from termination" msgstr "하위 %d을(를) 다시 복제하지 않음, 종료에서 복구할 수 없음" #, python-format msgid "Operator %s is not supported" msgstr " %s 연산자는 지원되지 않음" msgid "" "Please provide no more than one option from this list: --prefer_new, --" "overwrite" msgstr "" "이 목록에서 하나 이하의 옵션을 제공하십시오.: --prefer_new, --overwrite" #, python-format msgid "" "RPC Call Error: %(val)s\n" "%(tb)s" msgstr "" "RPC 호출 오류: %(val)s\n" "%(tb)s" #, python-format msgid "Received HTTP error while uploading image %s" msgstr "이미지 %s를 업로드 하는 도중 HTTP 에러 수신" #, python-format msgid "Registry client request %(method)s %(action)s raised %(exc_name)s" msgstr "" "레지스트리 클라이언트 요청 %(method)s %(action)s에서 %(exc_name)s이(가) 제기" "됨" #, python-format msgid "Signature verification failed for image %(id)s: %(e)s" msgstr "이미지 %(id)s의 서명 검증에 실패: %(e)s" #, python-format msgid "Task ID %s" msgstr "작업 ID %s" #, python-format msgid "" "Task [%(task_id)s] status failed to change from %(cur_status)s to " "%(new_status)s" msgstr "" "작업 [%(task_id)s]상태를 %(cur_status)s에서 %(new_status)s(으)로 변경하는 데 " "실패" #, python-format msgid "Task not found for task_id %s" msgstr "task_id %s에 대한 작업을 찾을 수 없음" #, python-format msgid "" "Task: %(task_id)s failed to import image %(image_id)s to the filesystem." msgstr "" "작업: %(task_id)s에서 파일 시스템에 이미지 %(image_id)s을(를) 가져오는 데 실" "패했습니다." msgid "" "This execution of Tasks is not setup. Please consult the project " "documentation for more information on the executors available." msgstr "" "이 작업의 실행은 설정되지 않았습니다. 프로젝트 문서의 executors available 관" "련 항목을 참고하여 상세한 정보를 확인 하시기 바랍니다." #, python-format msgid "" "This task type %(task_type)s is not supported by the current deployment of " "Glance. Please refer the documentation provided by OpenStack or your " "operator for more information." msgstr "" "이 작업 유형 %(task_type)s는 현재 배포된 Glance에서 지원되지 않습니다. 상세 " "정보는 오픈스택이나 운영자로부터 배포된 문서를 참조바랍니다." msgid "" "To use --prefer_new or --overwrite you need to combine of these options with " "--merge option." msgstr "" "--prefer_new 또는 --overwrite를 사용하려면 --merge 옵션과 이 옵션을 결합해야 " "합니다." #, python-format msgid "Unable to create image %s" msgstr "이미지 %s를 생성할 수 없음" #, python-format msgid "Unable to create verifier since algorithm is unsupported: %(e)s" msgstr "알고리즘이 지원되지 않으므로 확인자를 생성할 수 없음: %(e)s" #, python-format msgid "Unable to delete image %(image_id)s: %(e)s" msgstr "이미지 %(image_id)s을(를) 삭제할 수 없음 : %(e)s" #, python-format msgid "Unable to delete image %s" msgstr "이미지 %s를 삭제할 수 없음" msgid "Unable to get images" msgstr "이미지를 가져올 수 없음" #, python-format msgid "Unable to kill image %(id)s: " msgstr "이미지 %(id)s 를 kill 할 수 없음:" #, python-format msgid "Unable to load artifacts: %s" msgstr "아티팩트를 로드할 수 없음: %s" #, python-format msgid "Unable to restore artifact %(artifact_id)s: %(e)s" msgstr "아티팩트 %(artifact_id)s을(를) 복원할 수 없음: %(e)s" #, python-format msgid "Unable to restore image %(image_id)s: %(e)s" msgstr "이미지 %(image_id)s를 복구할 수 없음 : %(e)s" #, python-format msgid "Unable to retrieve certificate with ID %(id)s: %(e)s" msgstr "ID가 %(id)s인 인증서를 검색할 수 없음: %(e)s" #, python-format msgid "Unable to scrub image %(id)s from a location. Reason: %(exc)s " msgstr "위치에서 %(id)s 이미지를 삭제할 수 없습니다. 이유: %(exc)s " #, python-format msgid "Unable to show image %s" msgstr "이미지 %s를 보여줄 수 없음" #, python-format msgid "Unable to update image %s" msgstr "이미지 %s를 업데이트 할 수 없음" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 함" #, python-format msgid "could not find %s" msgstr "%s 를 찾을 수 없음" #, python-format msgid "swift config file %(conf_file)s:%(exc)s not found" msgstr "swift 구성 파일 %(conf_file)s:%(exc)s을(를) 찾을 수 없음" glance-12.0.0/glance/locale/ko_KR/LC_MESSAGES/glance.po0000664000567000056710000034770212701407051023251 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the glance project. # # Translators: # HyunWoo Jo , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Ian Y. Choi , 2016. #zanata # Lucas Palm , 2016. #zanata # SeYeon Lee , 2016. #zanata # Sungjin Kang , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0rc2.dev6\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-24 10:40+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-24 10:42+0000\n" "Last-Translator: SeYeon Lee \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "%(attribute)s have to be string" msgstr "%(attribute)s 속성은 문자열이어야 함" #, python-format msgid "%(attribute)s is required" msgstr "%(attribute)s 속성이 필요함" #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "%(attribute)s의 길이는 %(length)i 이하여야 함" #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "%(attribute)s의 길이는 %(length)i 이상이어야 함" #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "%(attribute)s은(는) 패턴 %(pattern)s과(와) 일치해야 함" #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "%(cls)s 예외가 마지막 rpc 호출에서 발생: %(val)s" #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "이미지 %(i_id)s의 멤버 목록에서 %(m_id)s을(를) 찾을 수 없습니다." #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "%(serv)s(pid %(pid)s)이(가) 실행 중..." #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "%(serv)s이(가) 이미 실행 중으로 표시됨: %(pid)s" #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being used." msgstr "" "%(strategy)s이(가) 모듈로 두 번 등록되었습니다. %(module)s이(가) 사용되지 사" "용됩니다." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" "%(task_type)s의 %(task_id)s가 제대로 구성되지 않았습니다. 파일 시스템 저장소" "를 로드할 수 없습니다." #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" "%(task_type)s의 %(task_id)s가 제대로 구성되지 않았습니다. 누락 작업 디렉토" "리: %(work_dir)s" #, python-format msgid "%(verb)sing %(serv)s" msgstr "%(serv)s을(를) %(verb)s 중" #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "%(serv)s에서 %(conf)s과(와) 함께 %(verb)s 중" #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in brackets " "separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" "%s 호스트:포트 쌍을 지정하십시오. 여기서 호스트는 IPv4 주소, IPv6 주소, 호스" "트 이름 또는 FQDN입니다. IPv6 주소를 사용하는 경우에는 포트와 분리하여 대괄호" "로 묶으십시오(예: \"[fe80::a:b:c]:9876\")." #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "%s에는 4바이트 유니코드 문자를 포함할 수 없습니다." #, python-format msgid "%s is already stopped" msgstr "%s이(가) 이미 중지되었습니다." #, python-format msgid "%s is stopped" msgstr "%s이(가) 중지됨" #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "'%(param)s' 값이 범위를 벗어남, %(max)d을(를) 초과해서는 안 됨" msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" "키스톤 인증 전략이 사용될 경우 --os_auth_url 옵션 또는 OS_AUTH_URL 환경 변수" "가 필요합니다.\n" msgid "A body is not expected with this request." msgstr "이 요청에는 본문이 없어야 합니다." msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" "형식 이름 또는 이름-버전에 허용되는 아티팩트 목록입니다.목록이 비어 있으면 모" "든 아티팩트를 로드할 수 있습니다." #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "name=%(object_name)s인 메타데이터 정의 오브젝트가 namespace=" "%(namespace_name)s에서 찾을 수 없습니다." #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" "name=%(property_name)s인 메타데이터 정의 특성이 namespace=%(namespace_name)s" "에 이미 존재합니다." #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s already " "exists." msgstr "" "name=%(resource_type_name)s인 메타데이터 정의 자원 유형이 이미 존재합니다." #, python-format msgid "" "A metadata tag with name=%(name)s already exists in namespace=" "%(namespace_name)s." msgstr "" "name=%(name)s인 메타데이터 태그가 이미 namespace=%(namespace_name)s에서 찾을 " "수 없습니다." msgid "A set of URLs to access the image file kept in external store" msgstr "외부 저장소에 보관된 이미지 파일에 액세스하기 위한 URL 세트" msgid "" "AES key for encrypting store 'location' metadata. This includes, if used, " "Swift or S3 credentials. Should be set to a random string of length 16, 24 " "or 32 bytes" msgstr "" "암호화 저장소 '위치' 메타데이터의 AES 키. 이를 사용하는 경우스위프트 또는 S3 " "신임 정보를 포함합니다. 길이 16, 24 또는 32바이트자의 랜덤 문자열로 설정해야 " "합니다." msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "서버를 바인드할 주소. 특정 네트워크 인터페이스를 선택할 때 유용합니다." msgid "Address to find the registry server." msgstr "레지스트리 서버를 찾기 위한 주소입니다." msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" "인증되지 않은 사용자가 읽기 전용 권한으로 API에 액세스하도록 허용합니다. " "ContextMiddleware를 사용하는 경우에만 적용됩니다." #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "허용되는 값 %s은(는) 지정된 유효성 검증기에서 올바르지 않음" msgid "Amount of disk space (in GB) required to boot image." msgstr "이미지를 부팅하는 데 필요한 디스크 공간의 양(MB)" msgid "Amount of ram (in MB) required to boot image." msgstr "이미지를 부팅하는 데 필요한 RAM의 양(MB)" msgid "An identifier for the image" msgstr "이미지에 대한 ID" msgid "An identifier for the image member (tenantId)" msgstr "이미지 멤버에 대한 ID(tenantId)" msgid "An identifier for the owner of this task" msgstr "이 태스크 소유자의 ID" msgid "An identifier for the task" msgstr "태스크의 ID" msgid "An image file url" msgstr "이미지 파일 url" msgid "An image schema url" msgstr "이미지 스키마 url" msgid "An image self url" msgstr "이미지 자체 url" #, python-format msgid "An image with identifier %s already exists" msgstr "ID가 %s인 이미지가 이미 존재함" msgid "An import task exception occurred" msgstr "가져오기 작업 예외 발생" msgid "An object with the same identifier already exists." msgstr "동일한 ID를 갖는 오브젝트가 이미 존재합니다. " msgid "An object with the same identifier is currently being operated on." msgstr "동일한 ID가 있는 오브젝트가 현재 작동됩니다." msgid "An object with the specified identifier was not found." msgstr "지정된 ID를 갖는 오브젝트를 찾을 수 없습니다." msgid "An unknown exception occurred" msgstr "알 수 없는 예외가 발생했음" msgid "An unknown task exception occurred" msgstr "알 수 없는 태스크 예외 발생" #, python-format msgid "Array has no element at position %d" msgstr "배열의 위치 %d에 요소가 없음" msgid "Array property can't have item_type=Array" msgstr "배열 특성에 item_type=Array를 사용할 수 없음" #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "사용 중이므로 아티팩트 %s을(를) 삭제할 수 없음: %s" #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "아티팩트의 상태를 %(source)s에서 %(target)s(으)로 변경할 수 없음" #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "아티팩트가 스토리지 할당량을 초과함: %s" #, python-format msgid "Artifact has no property %(prop)s" msgstr "아티팩트에 특성 %(prop)s이(가) 없음" #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "아티팩트 상태를 %(curr)s에서 %(to)s(으)로 변경할 수 없음" #, python-format msgid "Artifact storage media is full: %s" msgstr "아티팩트 스토리지 매체가 가득 참: %s" #, python-format msgid "" "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "" "이름이 '%(name)s'(이)며 버전이 '%(version)s'인 아티팩트 유형이 알 수 없는 유" "형임" msgid "Artifact with a circular dependency can not be created" msgstr "순환 종속성을 가진 아티팩트를 작성할 수 없음" #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "ID가 %(id)s인 아티팩트에 액세스할 수 없음" #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "ID가 %(id)s인 아티팩트를 찾을 수 없음" msgid "Artifact with the specified type, name and version already exists" msgstr "지정된 유형, 이름, 버전을 사용하는 아티팩트가 이미 있음" #, python-format msgid "" "Artifact with the specified type, name and version already has the direct " "dependency=%(dep)s" msgstr "" "지정된 유형, 이름, 버전을 사용하는 아티팩트에 이미 직접 종속성 %(dep)s이" "(가) 있음" #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" "지정된 유형, 이름, 버전을 사용하는 아티팩트에 상태 전이 종속성 %(dep)s이(가) " "있음" msgid "Attempt to set readonly property" msgstr "읽기 전용 특성 설정 시도" msgid "Attempt to set value of immutable property" msgstr "불변 특성 값 설정 시도" #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "중복 이미지를 업로드하려고 시도 중: %s" msgid "Attempted to update Location field for an image not in queued status." msgstr "" "큐에 들어간 상태에 있지 않은 이미지에 대한 위치 필드를 업데이트하려고 시도함" #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "'%(property)s' 속성은 읽기 전용입니다." #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "'%(property)s' 속성은 예약되어 있습니다." #, python-format msgid "Attribute '%s' is read-only." msgstr "'%s' 속성은 읽기 전용입니다." #, python-format msgid "Attribute '%s' is reserved." msgstr "'%s' 속성은 예약되어 있습니다." msgid "Attribute container_format can be only replaced for a queued image." msgstr "큐에 있는 이미지에 대해 속성 container_format을 대체할 수 있습니다." msgid "Attribute disk_format can be only replaced for a queued image." msgstr "큐에 있는 이미지에 대해 속성 disk_format을 대체할 수 있습니다." msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" "Swift 인증 서비스에 대한 사용자 인증을 위한인증 키입니다. (더 이상 사용되지 " "않음)" #, python-format msgid "Auth service at URL %(url)s not found." msgstr "URL %(url)s의 Auth 서비스를 찾을 수 없습니다." #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" "인증 오류 - 파일 업로드 중에 토큰이 만료되었습니다. %s의 이미지 데이터를 삭제" "합니다." msgid "Authorization failed." msgstr "권한 부여에 실패했습니다." msgid "Available categories:" msgstr "사용 가능한 카테고리:" #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" "잘못된 \"%s\" 쿼리 필터 형식입니다. ISO 8601 DateTime 표기법을 사용하십시오." #, python-format msgid "Bad Command: %s" msgstr "잘못된 명령: %s" #, python-format msgid "Bad header: %(header_name)s" msgstr "잘못된 헤더: %(header_name)s" #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "잘못된 값이 %(filter)s 필터에 전달됨, %(val)s 제공" #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "양식이 잘못된 S3 URI: %(uri)s" #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "Swift URI에 양식이 잘못된 신임 정보 '%(creds)s'" msgid "Badly formed credentials in Swift URI." msgstr "Swift URI에 양식이 잘못된 신임 정보가 있습니다." msgid "Base directory that the image cache uses." msgstr "이미지 캐시가 사용하는 기본 디렉토리입니다." msgid "BinaryObject property cannot be declared mutable" msgstr "BinaryObject 특성은 변경 가능으로 선언할 수 없음" #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "Blob %(name)s에 값이 여러 개일 수 없음" msgid "Blob size is not set" msgstr "Blob 크기를 설정하지 않음" msgid "Body expected in request." msgstr "요청에 본문이 있어야 합니다." msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "파일 및 legacy_image_id를 동시에 지정할 수 없음" msgid "CA certificate file to use to verify connecting clients." msgstr "클라이언트 연결을 확인하기 위해 사용하는 CA 인증 파일" msgid "Cannot be a negative value" msgstr "음수 값일 수 없음" msgid "Cannot be a negative value." msgstr "음수 값이 될 수 없습니다." #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "이미지 %(key)s '%(value)s'을(를) 정수로 변환할 수 없습니다." msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "예약된 이름 'metadata'를 사용하는 아티팩트 특성을 선언할 수 없음" #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "아티팩트 '%(name)s'을(를) 로드할 수 없음" msgid "Cannot remove last location in the image." msgstr "이미지에서 마지막 위치를 제거할 수 없습니다." #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "이미지 %(image_id)s 에 대한 데이터 저장 불가: %(error)s" msgid "Cannot set locations to empty list." msgstr "위치를 비어 있는 목록으로 설정할 수 없습니다." msgid "Cannot specify 'max_size' explicitly" msgstr "'max_size'를 명시적으로 지정할 수 없음" msgid "Cannot specify 'min_size' explicitly" msgstr "'min_size'를 명시적으로 지정할 수 없음" msgid "Cannot upload to an unqueued image" msgstr "큐에 들어가지 않은 이미지에 업로드할 수 없음" #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "연산자 %(op)s(으)로 이 매개변수를 사용할 수 없음" msgid "Certificate file to use when starting API server securely." msgstr "API 서버를 안전하게 시작할 때 사용하는 인증 파일" #, python-format msgid "Certificate format not supported: %s" msgstr "인증서 형식이 지원되지 않음: %s" #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "%s UTC 후에 인증서가 올바르지 않음" #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "%s UTC 전에 인증서가 올바르지 않음" #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "체크섬 검증에 실패했습니다. '%s' 이미지 캐시가 중단되었습니다." msgid "Client disconnected before sending all data to backend" msgstr "모든 데이터를 백엔드로 전송하기 전에 클라이언트 연결이 끊어짐" msgid "Command not found" msgstr "명령을 찾을 수 없음" msgid "Configuration option was not valid" msgstr "구성 옵션이 올바르지 않음" #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "연결 오류/URL %(url)s에서 Auth 서비스에 대한 잘못된 요청입니다." #, python-format msgid "Constructed URL: %s" msgstr "URL을 구성함: %s" msgid "Container format is not specified." msgstr "컨테이너 형식이 지정되지 않았습니다." msgid "Content-Type must be application/octet-stream" msgstr "Content-Type은 application/octet-stream이어야 함" #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "%(image_id)s 이미지에 대한 손상된 이미지 다운로드" #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "30초 동안 시도한 후 %(host)s:%(port)s에 바인드할 수 없음" msgid "Could not find OVF file in OVA archive file." msgstr "OVA 아카이브 파일에서 OVF를 찾을 수 없습니다." #, python-format msgid "Could not find metadata object %s" msgstr "메타데이터 오브젝트 %s을(를) 찾을 수 없음" #, python-format msgid "Could not find metadata tag %s" msgstr "메타데이터 태그 %s을(를) 찾을 수 없음" #, python-format msgid "Could not find namespace %s" msgstr "%s 네임스페이스를 찾을 수 없음" #, python-format msgid "Could not find property %s" msgstr "특성 %s을(를) 찾을 수 없음" msgid "Could not find required configuration option" msgstr "필요한 구성 옵션을 찾을 수 없음" #, python-format msgid "Could not find task %s" msgstr "태스크 %s을(를) 찾을 수 없음" #, python-format msgid "Could not update image: %s" msgstr "이미지를 업데이트할 수 없음: %s" msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "여러 디스크를 포함하는 OVA 패키지는 현재 지원되지 않습니다." msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" "사용자 정의 유효성 검증기 목록에 튜플 '(function, message)'을 포함해야 함" #, python-format msgid "Data for image_id not found: %s" msgstr "image_id에 대한 데이터를 찾을 수 없음: %s" msgid "Data supplied was not valid." msgstr "제공된 데이터가 올바르지 않습니다." msgid "Date and time of image member creation" msgstr "이미지 멤버 작성 날짜 및 시간" msgid "Date and time of image registration" msgstr "이미지 등록 날짜 및 시간" msgid "Date and time of last modification of image member" msgstr "이미지 멤버의 최종 수정 날짜 및 시간" msgid "Date and time of namespace creation" msgstr "네임스페이스 작성 날짜 및 시간" msgid "Date and time of object creation" msgstr "오브젝트 작성 날짜 및 시간" msgid "Date and time of resource type association" msgstr "자원 유형 연관 날짜 및 시간" msgid "Date and time of tag creation" msgstr "태그 작성 날짜 및 시간" msgid "Date and time of the last image modification" msgstr "최종 이미지 수정의 날짜 및 시간" msgid "Date and time of the last namespace modification" msgstr "최종 네임스페이스 수정의 날짜 및 시간" msgid "Date and time of the last object modification" msgstr "최종 오브젝트 수정의 날짜 및 시간" msgid "Date and time of the last resource type association modification" msgstr "최종 자원 유형 연관 수정의 날짜 및 시간" msgid "Date and time of the last tag modification" msgstr "최종 태그 수정 날짜 및 시간" msgid "Datetime when this resource was created" msgstr "이 자원이 작성된 Datetime" msgid "Datetime when this resource was updated" msgstr "이 자원이 업데이트된 Datetime" msgid "Datetime when this resource would be subject to removal" msgstr "이 자원이 제거되는 Datetime" msgid "" "Default value for the number of items returned by a request if not specified " "explicitly in the request" msgstr "" "요청에 명시적으로 지정하지 않은 경우 요청에 의해 리턴되는 항목 수의 기본값" msgid "Default value is invalid" msgstr "기본값이 올바르지 않음" #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "할당량을 초과하기 때문에 아티팩트 업로드를 거부하는 중: %s" #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "할당량을 초과하기 때문에 이미지 업로드를 거부하는 중: %s" #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "%d바이트를 초과하는 이미지의 업로드를 거부하는 중입니다." #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "종속성 특성 '%s'을(를) 먼저 삭제해야 함" msgid "Dependency relations cannot be mutable" msgstr "종속성 관계는 변경할 수 없음" msgid "Deploy the v1 OpenStack Images API." msgstr "v1 OpenStack 이미지 API를 배치하십시오." msgid "Deploy the v1 OpenStack Registry API." msgstr "v1 OpenStack 레지스트리 API를 배치하십시오." msgid "Deploy the v2 OpenStack Images API." msgstr "v2 OpenStack 이미지 API를 배치하십시오." msgid "Deploy the v2 OpenStack Registry API." msgstr "v2 OpenStack 레지스트리 API를 배치하십시오." msgid "Descriptive name for the image" msgstr "이미지에 대한 설명식 이름" msgid "Dictionary contains unexpected key(s)" msgstr "사전에 예상치 않은 키가 있음" msgid "Dictionary size is greater than maximum" msgstr "사전 크기가 최대값보다 큼" msgid "Dictionary size is less than minimum" msgstr "사전 크기가 최소값보다 작음" msgid "" "Digest algorithm which will be used for digital signature. Use the command " "\"openssl list-message-digest-algorithms\" to get the available algorithms " "supported by the version of OpenSSL on the platform. Examples are \"sha1\", " "\"sha256\", \"sha512\", etc." msgstr "" "디지털 서명에 사용할 다이제스트 알고리즘입니다. \"openssl list-message-" "digest-algorithms\" 명령을 사용하여 플랫폼에서 OpenSSL 버전이 지원하는 사용 " "가능 알고리즘을 가져오십시오. 예를 들어, \"sha1\", \"sha256\", \"sha512\" 등" "입니다." msgid "Disk format is not specified." msgstr "디스크 형식이 지정되지 않았습니다." msgid "Does not match pattern" msgstr "패턴과 일치하지 않음" #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: %(reason)s" msgstr "" "%(driver_name)s 드라이버가 올바르게 구성되지 않았습니다. 이유: %(reason)s" msgid "Either a file or a legacy_image_id has to be specified" msgstr "파일을 지정하거나 legacy_image_id를 지정해야 함" msgid "" "Error decoding your request. Either the URL or the request body contained " "characters that could not be decoded by Glance" msgstr "" "요청을 디코딩하는 중에 오류가 발생했습니다. URL이나 요청 본문에 Glance에서 디" "코딩할 수 없는 문자가 포함되어 있습니다." #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "이미지 %(image_id)s의 멤버를 페치하는 중에 오류 발생: %(inner_msg)s" msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "저장소 구성에 오류가 있습니다. 저장소에 아티팩트를 추가할 수 없습니다." msgid "Error in store configuration. Adding images to store is disabled." msgstr "저장소 구성에 오류가 있습니다. 이미지를 저장소에 추가할 수 없습니다." msgid "Error occurred while creating the verifier" msgstr "확인자를 작성하는 중에 오류 발생" msgid "Error occurred while verifying the signature" msgstr "서명을 확인하는 중에 오류 발생" msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "{\"member\": \"image_id\"} 형식의 멤버가 있어야 함" msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "{\"status\": \"status\"} 형식의 상태가 있어야 함" msgid "External source should not be empty" msgstr "외부 소스는 비어있지 않아야 함" #, python-format msgid "External sources are not supported: '%s'" msgstr "외부 소스가 지원되지 않음: '%s'" #, python-format msgid "Failed to activate image. Got error: %s" msgstr "이미지 활성화에 실패했습니다. 오류 발생: %s" #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "이미지 메타데이터 추가 실패. 오류 발생: %s" #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "삭제할 아티팩트 %(artifact_id)s을(를) 찾는 데 실패함" #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "삭제할 %(image_id)s 이미지를 찾는 데 실패함" #, python-format msgid "Failed to find image to delete: %s" msgstr "삭제할 image 가 발견되지 않음 : %s" #, python-format msgid "Failed to find image to update: %s" msgstr "업데이트할 이미지를 찾는 데 실패함: %s" #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "삭제하기 위한 리소스 타입 %(resourcetype)s 검색 실패" #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "이미지 캐시 데이터베이스를 초기화하지 못했습니다. 오류 발생: %s" #, python-format msgid "Failed to read %s from config" msgstr "구성에서 %s을(를) 읽지 못했음" #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "이미지 예약 실패, 오류 발생: %s" #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "이미지 메타데이터 업데이트 실패. 오류 발생: %s" #, python-format msgid "Failed to upload image %s" msgstr "이미지 %s을(를) 업로드하지 못했습니다." #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" "HTTP 오류로 인해 이미지 %(image_id)s의 이미지 데이터 업로드 실패: %(error)s" #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error: " "%(error)s" msgstr "" "내부 오류로 인해 이미지 %(image_id)s의 이미지 데이터 업로드 실패: %(error)s" #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" "파일 %(path)s에 올바르지 않은 백업 파일 %(bfile)s이(가) 있어 중단합니다." msgid "" "File based imports are not allowed. Please use a non-local source of image " "data." msgstr "" "파일 기반 가져오기는 허용되지 않습니다. 이미지 데이터의 로컬이 아닌 소스를 사" "용하십시오." msgid "File too large" msgstr "파일이 너무 큼" msgid "File too small" msgstr "파일이 너무 작음" msgid "Forbidden image access" msgstr "금지된 이미지 액세스" #, python-format msgid "Forbidden to delete a %s image." msgstr "%s 이미지를 삭제하는 것은 금지되어 있습니다." #, python-format msgid "Forbidden to delete image: %s" msgstr "이미지 삭제가 금지됨: %s" #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "%(status)s 이미지의 '%(key)s' 수정이 금지되었습니다." #, python-format msgid "Forbidden to modify '%s' of image." msgstr "이미지의 '%s'을(를) 수정하는 것이 금지되어 있습니다." msgid "Forbidden to reserve image." msgstr "이미지 예약은 금지되어 있습니다." msgid "Forbidden to update deleted image." msgstr "삭제된 이미지를 업데이트하는 것은 금지되어 있습니다." #, python-format msgid "Forbidden to update image: %s" msgstr "이미지 업데이트가 금지됨: %s" #, python-format msgid "Forbidden upload attempt: %s" msgstr "금지된 업로드 시도: %s" #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "요청이 금지되고 메타데이터 정의 namespace=%s이(가) 표시되지 않습니다." #, python-format msgid "Forbidding request, task %s is not visible" msgstr "요청 금지. 태스크 %s이(가) 표시되지 않음" msgid "Format of the container" msgstr "컨테이너의 형식" msgid "Format of the disk" msgstr "디스크의 형식" #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "blob %(name)s 데이터 가져오기 실패: %(err)s." #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "이미지 %(id)s 데이터를 불러올 수 없음: %(err)s" msgid "Glance client not installed" msgstr "Glance 클라이언트가 설치되지 않음" #, python-format msgid "Host \"%s\" is not valid." msgstr "\"%s\" 호스트가 올바르지 않습니다." #, python-format msgid "Host and port \"%s\" is not valid." msgstr "호스트 및 포트 \"%s\"이(가) 올바르지 않습니다." msgid "" "Human-readable informative message only included when appropriate (usually " "on failure)" msgstr "" "사용자가 읽을 수 있는 정보 메시지는 적절한 경우에만 포함됨 (일반적으로 실패 " "시)" msgid "If False doesn't trace SQL requests." msgstr "false인 경우 SQL 요청을 추적하지 않습니다." msgid "If False fully disable profiling feature." msgstr "false인 경우 프로파일링 기능을 전부 사용 안함으로 설정합니다." msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order to " "close the client socket connection explicitly after the response is sent and " "read successfully by the client, you simply have to set this option to False " "when you create a wsgi server." msgstr "" "False인 경우 서버는 헤더 \"연결: 닫음\"을 리턴합니다. True인 경우 서버는 응답" "에서 \"연결: 유휴\"를 리턴합니다. 응답을 보내고 클라이언트에서 이를 읽은 후" "에 클라이언트 소켓 연결을 명시적으로 닫으려면 wsgi 서버를 작성할 때 이 옵션" "을 False로 설정하면 됩니다." msgid "If true, image will not be deletable." msgstr "true일 경우 이미지는 삭제 불가능합니다." msgid "If true, namespace will not be deletable." msgstr "true일 경우 네임스페이스는 삭제 불가능합니다." #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "이미지 %(id)s이(가) 사용 중이므로 이를 삭제할 수 없음: %(exc)s" #, python-format msgid "Image %(id)s not found" msgstr "%(id)s 이미지를 찾을 수 없음" #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have been " "deleted during the upload: %(error)s" msgstr "" "업로드한 이미지 %(image_id)s을(를) 찾을 수 없음. 이미지는 업로드 중에 삭제되" "었을 수 있음: %(error)s" #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "%(image_id)s 이미지는 보호되므로 삭제할 수 없습니다." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload, cleaning up the chunks uploaded." msgstr "" "업로드 후에 %s 이미지를 찾을 수 없습니다. 업로드 동안 이미지가 삭제되었을 수 " "있습니다. 업로드된 청크를 정리합니다." #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted " "during the upload." msgstr "" "업로드 후 이미지 %s을(를) 찾을 수 없습니다. 업로드 중에 이미지가 삭되었을 수 " "있습니다." #, python-format msgid "Image %s is deactivated" msgstr "%s 이미지가 비활성화됨" #, python-format msgid "Image %s is not active" msgstr "%s 이미지가 활성 상태가 아님" #, python-format msgid "Image %s not found." msgstr "%s 이미지를 찾을 수 없음" #, python-format msgid "Image exceeds the storage quota: %s" msgstr "이미지가 스토리지 할당량을 초과함: %s" msgid "Image id is required." msgstr "이미지 ID가 필요합니다." msgid "Image is protected" msgstr "이미지가 보호됨" #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "이미지 %(id)s에 대한 이미지 멤버 한계 초과: %(e)s:" #, python-format msgid "Image name too long: %d" msgstr "이미지 이름이 너무 김: %d" msgid "Image operation conflicts" msgstr "이미지 조작이 충돌함" #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" "%(cur_status)s에서 %(new_status)s(으)로의 이미지 상태 전이가 허용되지 않음" #, python-format msgid "Image storage media is full: %s" msgstr "이미지 스토리지 미디어 공간이 꽉 참: %s" #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "이미지 %(id)s에 대한 이미지 태그 한계 초과: %(e)s:" #, python-format msgid "Image upload problem: %s" msgstr "이미지 업로드 문제: %s" #, python-format msgid "Image with identifier %s already exists!" msgstr "ID가 %s인 이미지가 이미 존재합니다!" #, python-format msgid "Image with identifier %s has been deleted." msgstr "ID가 %s인 이미지가 삭제되었습니다." #, python-format msgid "Image with identifier %s not found" msgstr "ID가 %s인 이미지를 찾을 수 없음" #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "지정된 ID %(image_id)s을(를) 가진 이미지를 찾을 수 없음" #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" "인증 전략이 올바르지 않음. 예상: \"%(expected)s\", 수신: \"%(received)s\"" #, python-format msgid "Incorrect request: %s" msgstr "올바르지 않은 요청: %s" msgid "Index is out of range" msgstr "색인이 범위를 벗어남" msgid "Index is required" msgstr "색인은 필수임" #, python-format msgid "Input does not contain '%(key)s' field" msgstr "입력에 '%(key)s' 필드가 포함되어 있지 않음" #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "아티팩트 스토리지 매체에 대한 권한이 충분하지 않음: %s" #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "이미지 스토리지 미디어 권한 부족 : %s" #, python-format msgid "Invalid Content-Type for work with %s" msgstr "%s과(와) 작업하는 데 올바르지 않은 Content-Type" #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "이 자원에 대해 올바르지 않은 JSON 포인터: '/%s'" #, python-format msgid "Invalid certificate format: %s" msgstr "올바르지 않은 인증서 형식: %s" #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "올바르지 않은 체크섬 '%s': 32자를 초과할 수 없음" msgid "Invalid configuration in glance-swift conf file." msgstr "glance-swift 구성 파일의 구성이 올바르지 않습니다." msgid "Invalid configuration in property protection file." msgstr "특성 보호 파일의 올바르지 않은 구성입니다." #, python-format msgid "Invalid container format '%s' for image." msgstr "이미지에 대한 컨테이너 형식 '%s'이(가) 올바르지 않습니다." #, python-format msgid "Invalid content type %(content_type)s" msgstr "올바르지 않은 컨텐츠 유형 %(content_type)s" msgid "Invalid dict property type" msgstr "올바르지 않은 사전 특성 유형" msgid "Invalid dict property type specification" msgstr "올바르지 않은 사전 특성 유형 스펙" #, python-format msgid "Invalid disk format '%s' for image." msgstr "이미지에 대한 디스크 형식 '%s'이(가) 올바르지 않습니다." #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "올바르지 않은 필터 값 %s입니다. 따옴표를 닫지 않았습니다." #, python-format msgid "" "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "올바르지 않은 필터 값 %s입니다. 닫기 따옴표 전에 쉼표가 없습니다." #, python-format msgid "" "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "올바르지 않은 필터 값 %s입니다. 열기 따옴표 전에 쉼표가 없습니다." #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "올바르지 않은 헤더 \"Content-Type\": %s" msgid "Invalid image id format" msgstr "올바르지 않은 이미지 ID 형식" msgid "Invalid item type specification" msgstr "올바르지 않은 항목 유형 스펙" #, python-format msgid "Invalid json body: %s" msgstr "올바르지 않은 json 본문: %s" msgid "Invalid jsonpatch request" msgstr "올바르지 않은 jsonpatch 요청" msgid "Invalid location" msgstr "잘못된 위치" #, python-format msgid "Invalid location %s" msgstr "올바르지 않은 위치 %s" #, python-format msgid "Invalid location: %s" msgstr "올바르지 않은 위치: %s" #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" "올바르지 않은 location_strategy 옵션: %(name)s. 올바른 전략 옵션 : " "%(strategies)s" msgid "Invalid locations" msgstr "잘못된 위치들" #, python-format msgid "Invalid locations: %s" msgstr "올바르지 않은 위치: %s" msgid "Invalid marker format" msgstr "올바르지 않은 마커 형식" msgid "Invalid marker. Image could not be found." msgstr "올바르지 않은 마커입니다. 이미지를 찾을 수 없습니다." #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "올바르지 않은 mask_gen_algorithm: %s" #, python-format msgid "Invalid membership association: %s" msgstr "올바르지 않은 멤버십 연관: %s" msgid "" "Invalid mix of disk and container formats. When setting a disk or container " "format to one of 'aki', 'ari', or 'ami', the container and disk formats must " "match." msgstr "" "디스크와 컨테이너 형식의 조합이 올바르지 않습니다. 디스크나 컨테이너 형식을 " "'aki', 'ari', 또는 'ami' 중 하나로 설정할 경우 컨테이너와 디스크형식이 일치해" "야 합니다." #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: %(available)s." msgstr "올바르지 않은 조작: `%(op)s`. 다음 중 하나여야 합니다. %(available)s." msgid "Invalid position for adding a location." msgstr "위치를 추가하기에 올바르지 않은 포지션입니다." msgid "Invalid position for removing a location." msgstr "위치를 제거하기에 올바르지 않은 포지션입니다." msgid "Invalid property definition" msgstr "올바르지 않은 특성 정의" #, python-format msgid "Invalid pss_salt_length: %s" msgstr "올바르지 않은 pss_salt_length: %s" #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "서명 키 유형에 올바르지 않은 공용 키: %s" msgid "Invalid reference list specification" msgstr "올바르지 않은 참조 목록 스펙" msgid "Invalid referenced type" msgstr "올바르지 않은 참조 유형" msgid "Invalid request PATCH for work with blob" msgstr "blob와 작업하는 데 올바르지 않은 요청 PATCH" msgid "Invalid service catalog json." msgstr "올바르지 않은 서비스 카탈로그 json입니다." #, python-format msgid "Invalid signature hash method: %s" msgstr "올바르지 않은 서명 해시 메소드: %s" #, python-format msgid "Invalid signature key type: %s" msgstr "올바르지 않은 서명 키 유형: %s" #, python-format msgid "Invalid sort direction: %s" msgstr "올바르지 않은 정렬 방향: %s" #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one of " "the following: %(available)s." msgstr "" "올바르지 않는 정렬 키: %(sort_key)s. 유형 버전이 설정되지 않은 경우에는 다음 " "중 하나여야 합니다. %(available)s." #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" "올바르지 않은 정렬 키: %(sort_key)s. 다음 중 하나여야 합니다. %(available)s." #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "올바르지 않은 정렬 키: %(sort_key)s. 이 특성을 기준으로 정렬할 수 없음" #, python-format msgid "Invalid status value: %s" msgstr "올바르지 않은 상태 값: %s" #, python-format msgid "Invalid status: %s" msgstr "올바르지 않은 상태: %s" #, python-format msgid "Invalid time format for %s." msgstr "%s에 올바르지 않은 시간 형식입니다." msgid "Invalid type definition" msgstr "올바르지 않은 유형 정의" #, python-format msgid "Invalid type value: %s" msgstr "올바르지 않은 유형 값: %s" #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition namespace " "with the same name of %s" msgstr "" "올바르지 않은 업데이트입니다. %s과(와) 동일한 이름의 메타데이터 정의 네임스페" "이스가 중복됩니다." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "올바르지 않은 업데이트입니다. namespace=%(namespace_name)s에서 name=%(name)s" "과(와) 동일한 이름의 메타데이터 정의 오브젝트가 중복됩니다." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "올바르지 않은 업데이트입니다. namespace=%(namespace_name)s에서 name=%(name)s" "과(와) 동일한 이름의 메타데이터 정의 오브젝트가 중복됩니다." #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition property " "with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" "올바르지 않은 업데이트입니다. 네임스페이스=%(namespace_name)s의 동일한 이름=" "%(name)s(으)로 메타데이터 정의 특성이 중복됩니다." #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "매개변수 '%(param)s'의 올바르지 않은 값 '%(value)s': %(extra_msg)s" #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "옵션 %(option)s에 올바르지 않은 값: %(value)s" #, python-format msgid "Invalid visibility value: %s" msgstr "올바르지 않은 가시성 값: %s" msgid "Is not allowed value" msgstr "허용되는 값이 아님" #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using " "ipv6 since eventlet.greendns currently breaks with ipv6 addresses. Please " "ensure that eventlet is not imported prior to this being set." msgstr "" "다음과 같이 설정하기 전에 eventlet 모듈을 가져왔습니다. %s='yes'. eventlet." "greendns가 현재 ipv6 주소로 나뉘므로 ipv6를 사용 중인 경우 지금 eventlet." "greendns를 사용 안함으로 설정해야 합니다. 이와 같이 설정하기 전에 eventlet을 " "가져오지 않아야 합니다." msgid "It's invalid to provide multiple image sources." msgstr "여러 개의 이미지 소스를 제공하면 안 됩니다." msgid "It's not allowed to add locations if locations are invisible." msgstr "위치가 표시되지 않는 경우 위치를 추가할 수 없습니다." msgid "It's not allowed to remove locations if locations are invisible." msgstr "위치가 표시되지 않는 경우 위치를 제거할 수 없습니다." msgid "It's not allowed to update locations if locations are invisible." msgstr "위치가 표시되지 않는 경우 위치를 업데이트할 수 없습니다." msgid "Items have to be unique" msgstr "항목이 고유해야 함" msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' are " "allowed." msgstr "" "Json 경로는 '/'로 시작해야 하며 '/'로 끝나지 않아야 합니다. 두 개의 후속 " "'/'는 허용되지 않습니다." msgid "Legacy image was not found" msgstr "레거시 이미지를 찾을 수 없음" msgid "Length is greater than maximum" msgstr "길이가 최대값보다 큼" msgid "Length is less than minimum" msgstr "길이가 최소값보다 작음" msgid "Limit param must be an integer" msgstr "Limit 매개변수는 정수여야 함" msgid "Limit param must be positive" msgstr "Limit 매개변수는 양수여야 함" #, python-format msgid "Limit param must not be higher than %d" msgstr "Limit 매개변수는 %d보다 크지 않아야 함" msgid "Limits request ID length." msgstr "요청 ID 길이를 제한합니다." msgid "List definitions may hot have defaults" msgstr "목록 정의에 기본값을 사용할 수 없음" msgid "List of strings related to the image" msgstr "이미지와 관련된 문자열 목록" msgid "List size is greater than maximum" msgstr "목록 크기가 최대값보다 큼" msgid "List size is less than minimum" msgstr "목록 크기가 최소값보다 작음" msgid "Loop time between checking for new items to schedule for delete." msgstr "삭제를 위해 스케줄에 대한 새 항목을 확인하는 사이의 루프 시간입니다." #, python-format msgid "Malformed Content-Range header: %s" msgstr "잘못된 형식의 컨텐츠 범위 헤더: %s" msgid "Malformed JSON in request body." msgstr "요청 본문에서 JSON의 형식이 올바르지 않습니다." msgid "Max string length may not exceed 255 characters" msgstr "최대 문자열 길이는 255자를 초과할 수 없음" msgid "Maximal age is count of days since epoch." msgstr "최대 기간은 epoch 이후의 일 수입니다." msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated by " "the Keystone v3 API with big service catalogs" msgstr "" "허용할 메시지 헤더의 최대 행 크기입니다. 더 큰 토큰 사용 시 max_header_line" "을 늘려야 할 수 있습니다(일반적으로 큰 서비스 카탈로그가 있는 키스톤 v3 API에" "서 생성됨)." msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "이미지당 이미지 멤버의 최대 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Maximum number of locations allowed on an image. Negative values evaluate to " "unlimited." msgstr "이미지에 허용된 최대 위치 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Maximum number of properties allowed on an image. Negative values evaluate " "to unlimited." msgstr "이미지에서 허용된 최대 특성 수입니다. 음수 값은 무제한을 의미합니다." msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "이미지에 허용된 최대 태그 수입니다. 음수 값은 무제한을 의미합니다." msgid "Maximum permissible number of items that could be returned by a request" msgstr "요청에서 리턴할 수 있는 허용 가능한 최대 항목 수" #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "최대 경로 재지정(%(redirects)s)에 도달했습니다." msgid "" "Maximum size of image a user can upload in bytes. Defaults to 1099511627776 " "bytes (1 TB).WARNING: this value should only be increased after careful " "consideration and must be set to a value under 8 EB (9223372036854775808)." msgstr "" "사용자가 업로드할 수 있는 이미지의 최대 크기(바이트).1099511627776바이트" "(1TB). 경고: 이 값은 신중하게 고려하고 늘려야 하며 8EB(9223372036854775808) " "이하의 값으로 설정해야 합니다." #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "멤버 %(member_id)s이(가) 이미지 %(image_id)s에 대해 중복됨" msgid "Member can't be empty" msgstr "멤버는 비어 있을 수 없음" msgid "Member to be added not specified" msgstr "추가할 멤버를 지정하지 않음" msgid "Membership could not be found." msgstr "멤버십을 찾을 수 없습니다." #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" "메타데이터 정의 네임스페이스 %(namespace)s이(가) 보호되고 삭제되었을 수 있습" "니다." #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "id=%s에 대한 메타데이터 정의 네임스페이스를 찾을 수 없음" #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "메타데이터 정의 namespace=%(namespace_name)s을(를) 찾을 수 없습니다." #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" "메타데이터 정의 오브젝트 %(object_name)s이(가) 보호되고 삭제되었을 수 있습니" "다." #, python-format msgid "Metadata definition object not found for id=%s" msgstr "id=%s에 대한 메타데이터 정의 오브젝트를 찾을 수 없음" #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be " "deleted." msgstr "" "메타데이터 정의 특성 %(property_name)s이(가) 보호되고 삭제되었을 수 있습니다." #, python-format msgid "Metadata definition property not found for id=%s" msgstr "id=%s에 대한 메타데이터 정의 특성을 찾을 수 없음" #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-system " "type and cannot be deleted." msgstr "" "메타데이터 정의 resource-type %(resource_type_name)s은(는) 시드(seed) 시스템 " "유형이고 삭제할 수 없습니다." #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is protected " "and cannot be deleted." msgstr "" "메타데이터 정의 resource-type-association %(resource_type)s이(가) 보호되고 삭" "제할 수 없습니다." #, python-format msgid "" "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "메타데이터 정의 태그 %(tag_name)s은(는) 보호되므로 삭제할 수 없습니다." #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "id=%s에 대한 메타데이터 정의 태그를 찾을 수 없음" msgid "Min string length may not be negative" msgstr "최소 문자열 길이는 음수일 수 없음" msgid "Minimal rows limit is 1." msgstr "최소 행 제한은 1입니다." #, python-format msgid "Missing required credential: %(required)s" msgstr "필수 신임 정보 누락: %(required)s" #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally means " "that a region is required and you have not supplied one." msgstr "" "다중 '이미지' 서비스가 %(region)s 리젼에 일치합니다. 이는 일반적으로 리젼이 " "필요하지만 아직 리젼을 제공하지 않은 경우 발생합니다." msgid "Must supply a positive, non-zero value for age." msgstr "기간에 0이 아닌 양수 값을 제공해야 합니다." msgid "Name of the paste configuration file." msgstr "붙여넣기 구성 파일의 이름입니다." #, python-format msgid "No artifact found with ID %s" msgstr "ID가 %s인 아티팩트를 찾을 수 없음" msgid "No authenticated user" msgstr "인증된 사용자가 없음" #, python-format msgid "No image found with ID %s" msgstr "ID가 %s인 이미지를 찾을 수 없음" #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "%(img)s 이미지에서 ID가 %(loc)s인 위치를 찾을 수 없음" msgid "No permission to share that image" msgstr "해당 이미지를 공유한 권한이 없음" #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "'%(name)s'용 플러그인을 로드하지 않음" msgid "No property to access" msgstr "액세스할 특성이 없음" #, python-format msgid "No such key '%s' in a dict" msgstr "사전에 해당 키 '%s'이(가) 없음" #, python-format msgid "Not a blob property '%s'" msgstr "blob 특성 '%s'이(가) 아님" msgid "Not a downloadable entity" msgstr "다운로드 가능한 엔티티가 아님" msgid "Not a list property" msgstr "목록 특성이 아님" #, python-format msgid "Not a list property '%s'" msgstr "목록 특성 '%s'이(가) 아님" msgid "Not a valid value type" msgstr "올바른 값 유형이 아님" #, python-format msgid "Not all dependencies are in '%s' state" msgstr "일부 종속성의 세부 상태가 '%s'임" #, python-format msgid "Not allowed to create members for image %s." msgstr "이미지 %s의 멤버를 작성할 수 없습니다." #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "'%s' 상태의 이미지를 비활성화할 수 없음" #, python-format msgid "Not allowed to delete members for image %s." msgstr "이미지 %s의 멤버를 삭제할 수 없습니다." #, python-format msgid "Not allowed to delete tags for image %s." msgstr "이미지 %s의 태그를 삭제할 수 없습니다." #, python-format msgid "Not allowed to list members for image %s." msgstr "이미지 %s의 멤버를 나열할 수 없습니다." #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "'%s' 상태의 이미지를 다시 활성화할 수 없음" #, python-format msgid "Not allowed to update members for image %s." msgstr "이미지 %s의 멤버를 업데이트할 수 없습니다." #, python-format msgid "Not allowed to update tags for image %s." msgstr "이미지 %s의 태그를 업데이트할 수 없습니다." #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" "이미지 %(image_id)s에 대한 이미지 데이터의 업로드가 허용되지 않음: %(error)s" #, python-format msgid "Not an array idx '%s'" msgstr "배열 idx '%s'이(가) 아님" msgid "Number of sort dirs does not match the number of sort keys" msgstr "정렬 디렉토리 수가 정렬 키 수와 일치하지 않음" msgid "OVA extract is limited to admin" msgstr "관리자만 OVA를 추출할 수 있음" msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config file. " "Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" "관심있는 OVF 메타데이터가 ovf-metadata.json 구성 파일에 지정되지 않았습니다. " "\"cim_pasd\"를 원하는 CIM_ProcessorAllocationSettingData 특성 목록으로 설정하" "십시오." msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "OVF 특성 구성 파일 \"ovf-metadata.json\"을 찾을 수 없습니다." msgid "Old and new sorting syntax cannot be combined" msgstr "이전 및 새 저장 구문은 결합할 수 없음" msgid "Only list indexes are allowed for blob lists" msgstr "blob 목록에는 목록 색인만 허용됨" #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "\"%s\" 오퍼레이션에는 \"value\"라는 이름의 멤버가 필요합니다." msgid "" "Operation objects must contain exactly one member named \"add\", \"remove\", " "or \"replace\"." msgstr "" "조작 오브젝트에는 \"add\", \"remove\", 또는 \"replace\" 멤버 중 하나만 포함되" "어야 합니다." msgid "" "Operation objects must contain only one member named \"add\", \"remove\", or " "\"replace\"." msgstr "" "조작 오브젝트에는 \"add\", \"remove\",또는 \"replace\" 멤버 중 하나만 포함되" "어야 합니다." msgid "Operations must be JSON objects." msgstr "오퍼레이션은 JSON 오브젝트여야 합니다." #, python-format msgid "Operator %(op)s is not supported" msgstr "연산자 %(op)s은(는) 지원되지 않음" #, python-format msgid "Original locations is not empty: %s" msgstr "원본 위치가 비어있지 않음: %s" msgid "Owner can't be updated by non admin." msgstr "비관리자는 소유자를 업데이트할 수 없습니다." msgid "Owner must be specified to create a tag." msgstr "태그를 작성하려면 소유자로 지정되어야 합니다." msgid "Owner of the image" msgstr "이미지의 소유자" msgid "Owner of the namespace." msgstr "네임스페이스의 소유자입니다." msgid "Param values can't contain 4 byte unicode." msgstr "매개변수 값에 4바이트 유니코드를 포함할 수 없습니다." msgid "" "Partial name of a pipeline in your paste configuration file with the service " "name removed. For example, if your paste section name is [pipeline:glance-" "api-keystone] use the value \"keystone\"" msgstr "" "서비스 이름이 제거된 붙여넣기 구성 파일에 있는 파이프라인의 부분 이름입니다. " "예를 들어 붙여넣기 섹션 이름이 [pipeline:glance-api-keystone]이면 값으로 " "\"keystone\"을 사용합니다." msgid "Path to the directory where json metadata files are stored" msgstr "Json 메타데이터 파일이 저장된 디렉토리 경로" #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" "플러그인 이름 '%(plugin)s'은(는) 아티팩트 유형 이름 '%(name)s'과(와) 일치해" "야 함" #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" "`%s` 포인터에 인식되는 이스케이프 시퀀스가 아닌 \"~\"가 포함되어 있습니다." #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "포인터 `%s`에 인접 \"/\"가 포함됩니다." #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "포인터 `%s`에 올바른 토큰이 포함되어 있지 않습니다." #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "`%s` 포인터가 \"/\"로 시작하지 않습니다." #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "포인터 `%s`이(가) \"/\"로 끝납니다." msgid "" "Pointer contains '~' which is not part of a recognized escape sequence [~0, " "~1]." msgstr "" "포인터에 인식되는 이스케이프 시퀀스 [~0, ~1]의 일부가 아닌 '~'가 포함되어 있" "습니다." #, python-format msgid "Port \"%s\" is not valid." msgstr "\"%s\" 포트가 올바르지 않습니다." msgid "Port the registry server is listening on." msgstr "레지스트리 서버가 청취 중인 포트입니다." #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "사전 릴리스 숫자 컴포넌트가 너무 큼(최대 %d자)" msgid "Private key file to use when starting API server securely." msgstr "API 서버를 안전하게 시작할 때 사용할 개인 키 파일" #, python-format msgid "Process %d not running" msgstr "프로세스 %d이(가) 실행 중이지 않음" #, python-format msgid "Properties %s must be set prior to saving data." msgstr "데이터를 저장하기 전에 %s 특성을 설정해야 합니다." #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type " "association prefix of '%(prefix)s'." msgstr "" "특성 %(property_name)s이(가) 예상 자원 유형 연관 접두부인 '%(prefix)s'(으)로 " "시작하지 않습니다." #, python-format msgid "Property %s already present." msgstr "%s 특성이 이미 존재합니다." #, python-format msgid "Property %s does not exist." msgstr "%s 특성이 존재하지 않습니다." #, python-format msgid "Property %s may not be removed." msgstr "%s 특성을 제거할 수 없습니다." #, python-format msgid "Property %s must be set prior to saving data." msgstr "데이터를 저장하기 전에 %s 특성을 설정해야 합니다." #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "특성 '%(name)s'에 값 '%(val)s'이(가) 없음: %(msg)s" #, python-format msgid "Property '%s' is protected" msgstr "'%s' 특성이 보호됨 " msgid "Property names can't contain 4 byte unicode." msgstr "특성 이름에 4바이트 유니코드를 포함할 수 없습니다." #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" "규칙 %(rule)s에 대한 조작 %(operation)s의 특성 보호를 찾을 수 없습니다. 이 조" "작 수행을 위해 허용된 역할이 없습니다." #, python-format msgid "Property's %(prop)s value has not been found" msgstr "특성의 %(prop)s 값을 찾지 못함" #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" "제공된 이미지 크기가 저장된 이미지 크기와 일치해야 합니다(제공된 크기: " "%(ps)d, 저장된 크기: %(ss)d)." #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "제공된 오브젝트가 스키마 '%(schema)s'에 일치하지 않음: %(reason)s" #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "제공된 태스크의 상태가 지원되지 않음: %(status)s" #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "제공된 태스크 유형이 지원되지 않음: %(type)s" msgid "Provides a user friendly description of the namespace." msgstr "사용자에게 익숙한 네임스페이스 설명을 제공합니다." msgid "Public images do not have members." msgstr "공용 이미지에 멤버가 없습니다." msgid "" "Public url to use for versions endpoint. The default is None, which will use " "the request's host_url attribute to populate the URL base. If Glance is " "operating behind a proxy, you will want to change this to represent the " "proxy's URL." msgstr "" "버전 엔드포인트에 사용할 공용 url입니다. 기본값은 None이며 이를 사용하면 요청" "의 host_url 속성을 사용하여 URL의 기본을 채웁니다. Glance가 프록시 뒤에서 작" "동 중이면 프록시 URL을 나타내도록 이를 변경할 수 있습니다." msgid "Python module path of data access API" msgstr "데이터 액세스 API의 Python 모듈 경로" msgid "Received invalid HTTP redirect." msgstr "올바르지 않은 HTTP 경로 재지정을 수신했습니다." #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "권한 부여를 위해 %(uri)s(으)로 경로 재지정 중입니다." #, python-format msgid "Registry service can't use %s" msgstr "레지스트리 서비스에서 %s을(를) 사용할 수 없음" #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" "API 서버에서 레지스트리가 올바르게 구성되지 않았습니다. 이유: %(reason)s" #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "관계 %(name)s에 값이 여러 개일 수 없음" #, python-format msgid "Reload of %(serv)s not supported" msgstr "%(serv)s을(를) 다시 로드할 수 없음" #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "신호(%(sig)s)와 함께 %(serv)s(pid %(pid)s) 다시 로드 중" #, python-format msgid "Removing stale pid file %s" msgstr "시간이 경과된 pid 파일 %s을(를) 제거하는 중" msgid "Request body must be a JSON array of operation objects." msgstr "요청 본문은 오퍼레이션 오브젝트의 JSON 배열이어야 합니다." msgid "Request must be a list of commands" msgstr "요청은 쉼표로 구분한 목록이어야 합니다." msgid "" "Required image properties for signature verification do not exist. Cannot " "verify signature." msgstr "서명 검증에 필요한 이미지 특성이 없습니다. 서명을 확인할 수 없습니다." #, python-format msgid "Required store %s is invalid" msgstr "필수 저장소 %s이(가) 올바르지 않음" msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: http://docs.openstack.org/developer/heat/template_guide/openstack." "html" msgstr "" "자원 유형 이름은 히트 자원 유형에 맞게 지정되어야 합니다.사용 가능: http://" "docs.openstack.org/developer/heat/template_guide/openstack.html" msgid "Response from Keystone does not contain a Glance endpoint." msgstr "Keystone의 응답에 Glance 엔드포인트가 들어있지 않습니다." msgid "Role used to identify an authenticated user as administrator." msgstr "인증된 사용자를 관리자로 식별하는 데 사용되는 역할." msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" "장기 실행 프로세스로 실행됩니다. 지정하지 않는 경우(기본값) 삭제 조작을 한 " "번 실행한 다음 종료합니다. 종료하지 않도록 지정된 경우 구성에 지정된 " "wakeup_time 간격으로 삭제를 실행하십시오." msgid "Scope of image accessibility" msgstr "이미지 접근성의 범위" msgid "Scope of namespace accessibility." msgstr "네임스페이스 접근성의 범위입니다." msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing " "messages." msgstr "" "Glance API 및 Glance 레지스트리 서비스 추적 메시지를 승인하기 위해 사용하는 " "비밀키" #, python-format msgid "Server %(serv)s is stopped" msgstr "서버 %(serv)s이(가) 중지됨" #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "서버 작업자 작성에 실패함: %(reason)s." msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted units are B, KB, MB, " "GB and TB representing Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is assumed. Note that there " "should not be any space between value and unit and units are case sensitive." msgstr "" "각 사용자에 대한 시스템 차원의 할당량을 설정하십시오. 이 값은 모든 스토리지 " "시스템에서 사용자가 사용할 수 있는 전체 용량입니다. 0 값은 무제한을 의미합니" "다. 값에 선택적 단위를 지정할 수 있습니다. 허용되는 단위는 각각 바이트, 킬로" "바이트, 메가바이트, 기가바이트 및 테라바이트를 나타내는 B, KB, MB, GB 및 TB입" "니다. 단위를 지정하지 않으면 바이트로 가정합니다. 값과 단위 사이에 공백을 사" "용하지 않아야 하며 단위는 대소문자를 구분합니다." #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "이 조작에서는 표시 레벨 %(shl)s이(가) 지원되지 않음" msgid "Signature verification failed" msgstr "서명 검증 실패" msgid "Signature verification failed." msgstr "서명 검증에 실패했습니다." msgid "Size of image file in bytes" msgstr "이미지 파일의 크기(바이트)" msgid "" "Some resource types allow more than one key / value pair per instance. For " "example, Cinder allows user and image metadata on volumes. Only the image " "properties metadata is evaluated by Nova (scheduling or drivers). This " "property allows a namespace target to remove the ambiguity." msgstr "" "일부 자원 유형은 인스턴스 당 둘 이상의 키 / 값 쌍을 허용합니다.예를 들어, " "Cinder는 볼륨에 사용자 및 이미지 메타데이터를 허용합니다. 이미지 특성 메타데" "이터만 Nova(스케줄링 또는 드라이버)에 의해 평가됩니다. 이 특성은 모호성을 제" "거하기 위해 네임스페이스 대상을 허용합니다." msgid "Sort direction supplied was not valid." msgstr "제공된 정렬 방향이 올바르지 않습니다." msgid "Sort key supplied was not valid." msgstr "제공되는 정렬 키가 올바르지 않습니다." msgid "" "Specifies the prefix to use for the given resource type. Any properties in " "the namespace should be prefixed with this prefix when being applied to the " "specified resource type. Must include prefix separator (e.g. a colon :)." msgstr "" "제공된 자원 유형에 사용할 접두부를 지정합니다. 지정된 자원 유형에 적용되는 경" "우 네임스페이스의 모든 특성은 이 접두부로 시작해야 합니다. 접두부 구분 기호" "(예: 콜론 :)를 포함해야 합니다." msgid "Specifies which task executor to be used to run the task scripts." msgstr "" "태스크 스크립트를 실행하는 데 사용할 태스크 실행 프로그램을 지정합니다." msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "상태는 \"보류 중\", \"수락됨\" 또는 \"거부됨\"이어야 합니다." msgid "Status not specified" msgstr "상태를 지정하지 않음" msgid "Status of the image" msgstr "이미지의 상태" #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "%(cur_status)s에서 %(new_status)s(으)로의 상태 전이가 허용되지 않음" #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "신호(%(sig)s)와 함께 %(serv)s(pid %(pid)s) 중지 중" #, python-format msgid "Store for image_id not found: %s" msgstr "image_id에 대한 저장소를 찾을 수 없음: %s" #, python-format msgid "Store for scheme %s not found" msgstr "%s 스키마에 대한 저장소를 찾을 수 없음" #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image status to 'killed'." msgstr "" "제공된 %(attr)s (%(supplied)s) 및 %(attr)s (업로드된 이미지 %(actual)s(으)로" "부터 생성됨)이(가) 일치하지 않음. 이미지 상태를 '강제 종료됨'으로 설정." msgid "Supported values for the 'container_format' image attribute" msgstr "'container_format' 이미지 속성에 대해 지원되는 값" msgid "Supported values for the 'disk_format' image attribute" msgstr "'disk_format' 이미지 속성에 대해 지원되는 값" #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "%(serv)s이(가) %(rsn)s이므로 재파생이 억제되었습니다." msgid "System SIGHUP signal received." msgstr "시스템 SIGHUP 신호를 수신했습니다." #, python-format msgid "Task '%s' is required" msgstr "태스크 '%s'이(가) 필요함" msgid "Task does not exist" msgstr "태스크가 존재하지 않음" msgid "Task failed due to Internal Error" msgstr "내부 오류로 인해 태스크 실패" msgid "Task was not configured properly" msgstr "태스크가 제대로 구성되지 않음" #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "지정된 ID가 %(task_id)s인 태스크를 찾을 수 없음" msgid "The \"changes-since\" filter is no longer available on v2." msgstr "\"changes-since\" 필터는 v2에서 더 이상 사용할 수 없습니다." #, python-format msgid "The CA file you specified %s does not exist" msgstr "사용자가 지정한 CA 파일 %s이(가) 존재하지 않음" #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is no " "longer in valid status for further processing." msgstr "" "이 태스크 %(task_id)s에서 작성 중인 이미지 %(image_id)s 오브젝트는 더 이상 향" "후 처리에 사용할 수 있는 올바른 상태가 아닙니다." msgid "The Store URI was malformed." msgstr "저장소 URI의 형식이 올바르지 않습니다." msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect and " "using keystone auth, then URL of keystone can be specified." msgstr "" "키스톤 서비스에 대한 URL입니다. \"use_user_token\"이(가) 적용되지 않는 경우 " "키스톤 권한을 사용한 다음 키스톤의 URL을 지정할 수 있습니다." msgid "" "The address where the Swift authentication service is listening.(deprecated)" msgstr "Swift 인증 서비스가 청취 중인 주소입니다.(더 이상 사용되지 않음)" msgid "" "The administrators password. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "관리자 비밀번호입니다. \"use_user_token\"이(가) 적용되지 않는 경우 관리 신임 " "정보를 지정할 수 있습니다." msgid "" "The administrators user name. If \"use_user_token\" is not in effect, then " "admin credentials can be specified." msgstr "" "관리 사용자 이름입니다. \"use_user_token\"이(가) 적용되지 않는 경우 관리 신" "임 정보를 지정할 수 있습니다." msgid "The amount of time in seconds to delay before performing a delete." msgstr "삭제를 수행하기 전에 지연되는 시간(초)입니다." msgid "" "The amount of time to let an incomplete image remain in the cache, before " "the cache cleaner, if running, will remove the incomplete image." msgstr "" "캐시 클리너가 실행 중인 경우 불완전한 이미지를 제거하기 전에 불완전한 이미지" "가 캐시에 남아 있을 수 있는 시간입니다." msgid "" "The backlog value that will be used when creating the TCP listener socket." msgstr "TCP 리스너 소켓을 작성할 때 사용하는 백로그 값입니다." #, python-format msgid "The cert file you specified %s does not exist" msgstr "사용자가 지정한 인증 파일 %s이(가) 존재하지 않음" msgid "The config file that has the swift account(s)configs." msgstr "swift 계정 구성이 있는 구성 파일입니다." msgid "The current status of this task" msgstr "이 태스크의 현재 상태" #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not " "support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the cache " "directory." msgstr "" "디바이스 하우징 이미지 캐시 디렉터리 %(image_cache_dir)s의 Device 는 xattr을 " "지원하지 않습니다. fstab을 수정하거나 user_xattr 옵션을 디바이스 하우징 캐시 " "디렉터리의 적합한 행에 추가하기 바랍니다." msgid "The driver to use for image cache management." msgstr "이미지 캐시를 관리하는 데 사용되는 드라이버입니다." #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "버전 %s의 형식이 올바르지 않습니다. semver 표기법을 사용하십시오." msgid "" "The format to which images will be automatically converted. When using the " "RBD backend, this should be set to 'raw'" msgstr "" "이미지가 자동으로 변환될 형식입니다. RBD 백엔드를 사용할 때 'raw'로 설정해야 " "합니다." #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following " "list of supported uri %(supported)s" msgstr "" "제공된 uri가 올바르지 않습니다. 다음 지원 uri 목록에서 올바른 uri를 지정하십" "시오. %(supported)s" msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "디버그 연결을 청취하는 pydev 프로세스의 hostname/IP" #, python-format msgid "" "The image %s is already present on the slave, but our check for it did not " "find it. This indicates that we do not have permissions to see all the " "images on the slave server." msgstr "" "이미지 %s이(가) 이미 슬레이브에 존재하지만 해당 이미지를 검색할 때 발견하지 " "못했습니다. 슬레이브 서버에 있는 이미지 전체를 볼 수 있는 권한이 없기 때문입" "니다." #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "수신 아티팩트 blob이 너무 큼: %s" #, python-format msgid "The incoming image is too large: %s" msgstr "수신 이미지가 너무 큼: %s" #, python-format msgid "The key file you specified %s does not exist" msgstr "사용자가 지정한 키 파일 %s이(가) 존재하지 않음" #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "허용된 이미지 위치 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " "%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for this " "image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "이 이미지에 대해 허용된 이미지 멤버 수의 한계가 초과되었습니다. 시도함: " "%(attempted)s, 최대: %(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" "허용된 이미지 특성 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " "%(maximum)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" "허용된 이미지 특성 수의 한계가 초과되었습니다. 시도함: %(num)s, 최대: " "%(quota)s" #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. Attempted: " "%(attempted)s, Maximum: %(maximum)s" msgstr "" "허용된 이미지 태그 수의 한계가 초과되었습니다. 시도함: %(attempted)s, 최대: " "%(maximum)s" #, python-format msgid "The location %(location)s already exists" msgstr "위치 %(location)s이(가) 이미 있음" #, python-format msgid "The location data has an invalid ID: %d" msgstr "위치 데이터의 ID가 올바르지 않음: %d" msgid "" "The location of the property protection file.This file contains the rules " "for property protections and the roles/policies associated with it. If this " "config value is not specified, by default, property protections won't be " "enforced. If a value is specified and the file is not found, then the glance-" "api service will not start." msgstr "" "특성 보호 파일의 위치. 이 파일에는 특성 보호 규칙 및 해당 규칙과 연관된 역할/" "정책이 포함되어 있습니다. 이 구성 값을 지정하지 않으면 기본적으로 특성 보호" "가 적용되지 않습니다. 값을 지정했지만 파일을 찾을 수 없는 경우에는 glance-" "api 서비스가 시작되지 않습니다." #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" "name=%(record_name)s인 메타데이터 정의 %(record_type)s이(가) 삭제되지 않습니" "다. 기타 레코드를 여전히 참조합니다." #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "메타데이터 정의 namespace=%(namespace_name)s이(가) 이미 존재합니다." #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in " "namespace=%(namespace_name)s." msgstr "" "name=%(object_name)s인 메타데이터 정의 오브젝트를 namespace=" "%(namespace_name)s에서 찾을 수 없습니다." #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not found " "in namespace=%(namespace_name)s." msgstr "" "name=%(property_name)s인 메타데이터 정의 특성을 namespace=%(namespace_name)s" "에서 찾을 수 없습니다." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s already exists." msgstr "" "resource-type=%(resource_type_name)s의 메타데이터 정의 자원 유형 연관이 " "namespace=%(namespace_name)s에 이미 존재합니다." #, python-format msgid "" "The metadata definition resource-type association of resource-type=" "%(resource_type_name)s to namespace=%(namespace_name)s, was not found." msgstr "" "resource-type=%(resource_type_name)s의 메타데이터 정의 자원 유형 연관이 " "namespace=%(namespace_name)s에서 찾을 수 없습니다." #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, was " "not found." msgstr "" "name=%(resource_type_name)s인 메타데이터 정의 자원 유형을 찾을 수 없습니다." #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in namespace=" "%(namespace_name)s." msgstr "" "name=%(name)s인 메타데이터 정의 태그를 namespace=%(namespace_name)s에서 찾을 " "수 없습니다." msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "엔진이 실행되는 모드. '직렬' 또는 '병렬' 모드입니다." msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" "요청을 처리하기 위해 작성하는 하위 프로세스 작업자 수입니다. 기본값은 사용 가" "능한 CPU 수와 동일합니다." msgid "" "The number of parallel activities executed at the same time by the engine. " "The value can be greater than one when the engine mode is 'parallel'." msgstr "" "동일한 시간에 엔진에서 실행되는 병렬 활동 수입니다. 엔진 모드가 '병렬'인 경" "우 이 값은 1보다 클 수 있습니다." msgid "The parameters required by task, JSON blob" msgstr "태스크에서 필요로 하는 매개변수, JSON blob" msgid "" "The path to the cert file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE environment " "variable to a filepath of the CA cert file" msgstr "" "레지스트리 서버에 대한 SSL 연결에 사용할 인증 파일에 대한 경로(있는 경우). 또" "는 GLANCE_CLIENT_CERT_FILE 환경 변수를 CA 인증 파일의 파일 경로로 설정할 수 " "있습니다." msgid "" "The path to the certifying authority cert file to use in SSL connections to " "the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert file." msgstr "" "레지스트리 서버에 대한 SSL 연결에서 사용할 권한 인증 파일을 인증하는 경로(있" "는 경우). 또는 GLANCE_CLIENT_CA_FILE 환경 변수를 CA 인증 파일의 파일 경로로 " "설정할 수 있습니다." msgid "" "The path to the key file to use in SSL connections to the registry server, " "if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE environment " "variable to a filepath of the key file" msgstr "" "레지스트리 서버에 대한 SSL 연결에서 사용할 키 파일의 경로(있는 경우). 또는 " "GLANCE_CLIENT_KEY_FILE 환경 변수를 키 파일의 파일 경로로 설정할 수 있습니다." msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "이미지 캐시 관리에 사용되는 sqlite 파일 데이터베이스에 대한경로." msgid "" "The period of time, in seconds, that the API server will wait for a registry " "request to complete. A value of 0 implies no timeout." msgstr "" "레지스트리 요청을 완료하도록 API 서버가 대기하는 시간(초)입니다. 0 값은 제한" "시간 초과를 의미합니다." msgid "The port on which a pydev process is listening for connections." msgstr "pydev 프로세스가 연결을 청취하는 포트입니다." msgid "The port on which the server will listen." msgstr "서버가 청취할 포트입니다." msgid "" "The protocol to use for communication with the registry server. Either http " "or https." msgstr "레지스트리 서버와의 통신을 위해 사용할 프로토콜. http 또는 https." #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "제공된 본문 %(body)s이(가) 지정된 스키마에서 올바르지 않음: %(schema)s" msgid "The provided image is too large." msgstr "제공된 이미지가 너무 큽니다." #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "제공된 경로 '%(path)s'이(가) 올바르지 않음: %(explanation)s" msgid "" "The reference to the default swift account/backing store parameters to use " "for adding new images." msgstr "" "새 이미지를 추가하는 데 사용할 기본 swift 계정/백업 저장소 매개변수에 대한 참" "조입니다." msgid "" "The region for the authentication service. If \"use_user_token\" is not in " "effect and using keystone auth, then region name can be specified." msgstr "" "인증 서비스에 대한 리젼입니다. If \"use_user_token\"이(가) 적용되지 않는 경" "우 키스톤 권한을 사용한 다음 리젼 이름을 지정할 수 있습니다." msgid "The request returned 500 Internal Server Error." msgstr "요청 시 500 내부 서버 오류가 리턴되었습니다." msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" "요청에서 '503 서비스 사용 불가능'을 리턴했습니다. 이는 일반적으로 서비스 과부" "하나 기타 일시적 정전일 경우 발생합니다." #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that you " "have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" "요청이 302 다중 선택사항을 리턴했습니다. 이는 일반적으로 요청 URI에 버전 표시" "기를 포함하지 않았음을 의미합니다.\n" "\n" "리턴된 응답의 본문:\n" "%(body)s" #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means " "that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "요청에서 '413 요청 엔티티가 너무 큼'을 리턴했습니다. 이는 일반적으로 등급 한" "도나 할당량 임계값을 위반했음을 의미합니다.\n" "\n" "응답 본문:\n" "%(body)s" #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" "요청이 예상치 않은 상태를 리턴함: %(status)s.\n" "\n" "응답 본문:\n" "%(body)s" msgid "" "The requested image has been deactivated. Image data download is forbidden." msgstr "" "요청된 이미지가 비활성화되었습니다. 이미지 데이터 다운로드가 금지됩니다." msgid "The result of current task, JSON blob" msgstr "현재 태스크의 결과, JSON blob" msgid "The signature data was not properly encoded using base64" msgstr "base64를 사용하여 서명 데이터가 올바르게 인코딩되지 않음" #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" "데이터 크기 %(image_size)s이(가) 남은 한도 바이트 %(remaining)s을(를) 초과합" "니다." msgid "" "The size of thread pool to be used for scrubbing images. The default is one, " "which signifies serial scrubbing. Any value above one indicates the max " "number of images that may be scrubbed in parallel." msgstr "" "이미지 삭제에 사용될 스레드 풀의 크기입니다. 기본값은 1이며 이는직렬 삭제를 " "나타냅니다. 1보다 큰 값은 병렬로 삭제될 수 있는 최대 이미지 수를 표시합니다." #, python-format msgid "The specified member %s could not be found" msgstr "지정된 멤버 %s을(를) 찾을 수 없음" #, python-format msgid "The specified metadata object %s could not be found" msgstr "지정된 메타데이터 오브젝트 %s을(를) 찾을 수 없음" #, python-format msgid "The specified metadata tag %s could not be found" msgstr "지정된 메타데이터 태그 %s을(를) 찾을 수 없음" #, python-format msgid "The specified namespace %s could not be found" msgstr "지정된 네임스페이스 %s을(를) 찾을 수 없음" #, python-format msgid "The specified property %s could not be found" msgstr "지정된 특성 %s을(를) 찾을 수 없음" #, python-format msgid "The specified resource type %s could not be found " msgstr "지정된 자원 유형 %s을(를) 찾을 수 없음" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'" msgstr "" "삭제된 이미지 위치의 상태는 'pending_delete' 또는 'deleted'로만 설정할 수 있" "음" msgid "" "The status of deleted image location can only be set to 'pending_delete' or " "'deleted'." msgstr "" "삭제된 이미지 위치의 상태는 'pending_delete' 또는 'deleted'로만 설정할 수 있" "습니다." msgid "The status of this image member" msgstr "이 이미지 멤버의 상태" msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. This " "option will be applied when you using 'store_type' option as image location " "strategy defined by the 'location_strategy' config option." msgstr "" "저장소 환경 설정 순서를 가져오는 데 사용하는 저장소 이름입니다. 이름은 " "'stores' 구성 옵션에서 정의하는 저장소 중 하나를 사용하여 등록해야 합니다. " "이 옵션은 'store_type' 옵션을 'location_strategy' 구성 옵션에서 정의하는 이미" "지 위치 전략으로 사용할 때 적용됩니다." msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" "인증에 사용할 전략입니다. If \"use_user_token\"이(가) 적용되지 않는 경우인증 " "전략을 지정할 수 있습니다." #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "대상 멤버 %(member_id)s이(가) 이미 이미지 %(image_id)s." msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not in " "effect, then admin tenant name can be specified." msgstr "" "관리 사용자의 테넌트 이름입니다. \"use_user_token\"이(가) 적용되지 않는 경우" "관리 테넌트 이름을 지정할 수 있습니다." msgid "The type of task represented by this content" msgstr "이 컨텐츠에서 나타내는 태스크의 유형" msgid "The unique namespace text." msgstr "고유 네임스페이스 텍스트입니다." msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" "실행 중인 경우 캐시 프루너(pruner)가 이미지 캐시 지우기를 시작하는 상한(누적" "된 최대 캐시 크기(바이트))입니다." msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" "사용자에게 익숙한 네임스페이스의 이름입니다. 가능한 경우 UI에서 사용됩니다." msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "Swift 인증 서비스에 대해 인증할 사용자입니다.(더 이상 사용되지 않음)" msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in seconds " "that the connection must be idle before TCP starts sending keepalive probes." msgstr "" "소켓 옵션 TCP_KEEPIDLE의 값입니다. 이는 TCP가 활성 유지 프로브 전송을 시작하" "기 전에 연결이 유휴되어야 하는 시간(초)입니다." #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. Error: %(ioe)s" msgstr "" "%(error_key_name)s %(error_filename)s에 문제점이 있습니다. 문제점을 확인하십" "시오. 오류: %(ioe)s" #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. Please " "verify it. OpenSSL error: %(ce)s" msgstr "" "%(error_key_name)s %(error_filename)s에 문제점이 있습니다. 문제점을 확인하십" "시오. OpenSSL 오류: %(ce)s" #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" "키 쌍에 문제점이 있습니다. 인증 %(cert_file)s 및 키 %(key_file)s이(가) 함께 " "있는지 확인하십시오. OpenSSL 오류 %(ce)s" msgid "There was an error configuring the client." msgstr "클라이언트 구성 오류가 있었습니다." msgid "There was an error connecting to a server" msgstr "서버 연결 오류가 있었습니다." msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in " "the property protection file." msgstr "" "이 구성 값은 \"roles\" 또는 \"policies\"가 특성 보호 파일에서 사용되는지 여부" "를 표시합니다." msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" "해당 동작은 현재 Glance 작업에 대해서는 허용되지 않습니다. 이들은 expires_at " "특성에 기반한 시간에 도달하면 자동으로 삭제됩니다." msgid "This operation is currently not permitted on Glance images details." msgstr "해당 동작은 현재 Glance 이미지 세부사항에 대해서는 허용되지 않습니다." msgid "" "This value sets what strategy will be used to determine the image location " "order. Currently two strategies are packaged with Glance 'location_order' " "and 'store_type'." msgstr "" "이 값은 이미지 위치 순서를 판별하는 데 사용되는 전략을 설정합니다. 현재 두 개" "의 전략이 Glance 'location_order' 및 'store_type'으로 패키징됩니다." msgid "" "Time in hours for which a task lives after, either succeeding or failing" msgstr "이후에 태스크가 활성이 되는 시간(시), 성공 또는 실패" msgid "" "Timeout for client connections' socket operations. If an incoming connection " "is idle for this number of seconds it will be closed. A value of '0' means " "wait forever." msgstr "" "클라이언트 연결의 소켓 조작에 대한 제한시간입니다. 수신 연결이 이 기간(초) 동" "안 유휴 상태이면 연결이 닫힙니다. 값이 '0'이면 무기한 대기합니다." msgid "Too few arguments." msgstr "인수가 너무 적습니다." msgid "Too few locations" msgstr "위치가 너무 적음" msgid "Too many locations" msgstr "위치가 너무 많음" #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "%(size)d 이미지에 걸쳐 전체 크기는 %(img_count)d 바이트임" msgid "Turn on/off delayed delete." msgstr "지연된 삭제를 켜기/끄기입니다." msgid "Type version has to be a valid semver string" msgstr "유형 버전은 올바른 semver 문자열이어야 함" msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like swift://user:pass@http://authurl.com/v1/container/obj, " "you need to change it to use the swift+http:// scheme, like so: swift+http://" "user:pass@authurl.com/v1/container/obj" msgstr "" "URI는 스킴의 둘 이상의 발생을 포함할 수 없습니다. 다음과 유사한 URI를 지정한 " "경우 swift://user:pass@http://authurl.com/v1/container/obj, 다음과 같이 swift" "+http:// 스킴을 사용하도록 변경해야 합니다. swift+http://user:pass@authurl." "com/v1/container/obj" msgid "URL to access the image file kept in external store" msgstr "외부 저장소에 보관된 이미지 파일에 액세스하기 위한 URL" msgid "Unable to PUT to non-empty collection" msgstr "비어 있지 않은 콜렉션에 대해 PUT을 수행할 수 없음" #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" "pid 파일 %(pid)s을(를) 작성할 수 없습니다. 비루트로 실행 중인지 확인하십시" "오.\n" "임시 파일로 돌아가 다음을 사용하여 %(service)s 서비스를 중지할 수 있습니다.\n" " %(file)s %(server)s stop --pid-file %(fb)s" #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "알 수 없는 연산자 '%s'(으)로 필터링할 수 없습니다." msgid "Unable to filter on a range with a non-numeric value." msgstr "숫자가 아닌 값을 사용하여 범위에서 필터링할 수 없습니다." msgid "Unable to filter on a unknown operator." msgstr "알 수 없는 연산자를 필터링할 수 없습니다." msgid "Unable to filter using the specified operator." msgstr "지정된 연산자를 사용하여 필터링할 수 없습니다." msgid "Unable to filter using the specified range." msgstr "지정된 범위를 사용하여 필터링할 수 없습니다." #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "JSON 스키마 변경에서 '%s'을(를) 찾을 수 없음" #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the following: " "%(available)s." msgstr "" "JSON 스키마 변경에서 `op`를 찾을 수 없습니다. 다음 중 하나여야 합니다. " "%(available)s." msgid "Unable to get legacy image" msgstr "레거시 이미지를 가져올 수 없음" msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" "파일 디스크립터 한계를 늘릴 수 없습니다. 비루트로 실행 중인지 확인하십시오." #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" "구성 파일 %(conf_file)s에서 %(app_name)s을(를) 로드할 수 없습니다.\n" "오류 발생: %(e)r" #, python-format msgid "Unable to load schema: %(reason)s" msgstr "스키마를 로드할 수 없음: %(reason)s" #, python-format msgid "Unable to locate paste config file for %s." msgstr "%s에 대한 붙여넣기 구성 파일을 찾을 수 없습니다." msgid "Unable to modify collection in immutable or readonly property" msgstr "불변 특성 또는 읽기 전용 특성의 콜렉션을 수정할 수 없음" #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "ID로 인증서를 검색할 수 없음: %s" msgid "Unable to retrieve request id from context" msgstr "컨텍스트에서 요청 ID를 검색할 수 없음" msgid "Unable to specify artifact type explicitly" msgstr "아티팩트 유형을 명시적으로 지정할 수 없음" msgid "Unable to specify artifact type version explicitly" msgstr "아티팩트 유형 버전을 명시적으로 지정할 수 없음" msgid "Unable to specify version if multiple types are possible" msgstr "유형이 여러 개인 경우 버전을 지정할 수 없음" msgid "Unable to specify version if type is not specified" msgstr "유형을 지정하지 않은 경우 버전을 지정할 수 없음" #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "" "이미지 %(image_id)s에 대한 중복 이미지 데이터를 업로드할 수 없음: %(error)s" msgid "" "Unable to verify signature since the algorithm is unsupported on this system" msgstr "이 시스템에서 알고리즘이 지원되지 않으므로 서명을 확인할 수 없음" #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "서명을 확인할 수 없음: %(reason)s" msgid "Unauthorized image access" msgstr "권한 없는 이미지 액세스" msgid "Unexpected body type. Expected list/dict." msgstr "예기치않은 본문 타입. list/dict를 예상합니다." #, python-format msgid "Unexpected response: %s" msgstr "예상치 않은 응답: %s" #, python-format msgid "Unknown auth strategy '%s'" msgstr "알 수 없는 auth 전략 '%s'" #, python-format msgid "Unknown command: %s" msgstr "알 수 없는 명령: %s" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "알 수 없는 정렬 방향입니다. 'desc' 또는 'asc'여야 함" msgid "Unrecognized JSON Schema draft version" msgstr "인식되지 않는 JSON 스키마 드래프트 버전" msgid "Unrecognized changes-since value" msgstr "인식되지 않는 changes-since 값" #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "지원되지 않는 sort_dir. 허용 가능한 값: %s" #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "지원되지 않는 sort_key. 허용 가능한 값: %s" #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "값 %(value)d이(가) 범위를 벗어남, %(max)d을(를) 초과해서는 안 됨" msgid "Value is greater than maximum" msgstr "값이 최대값보다 큼" msgid "Value is less than minimum" msgstr "값이 최소값보다 작음" msgid "Value is required" msgstr "값이 필요함" #, python-format msgid "Version component is too large (%d max)" msgstr "버전 컴포넌트가 너무 큼(최대 %d)" #, python-format msgid "Version is invalid: %(reason)s" msgstr "버전이 올바르지 않음: %(reason)s" msgid "Virtual size of image in bytes" msgstr "이미지의 가상 크기(바이트)" msgid "Visibility must be either \"public\" or \"private\"" msgstr "가시성은 \"공용\" 또는 \"개인용\" 중 하나여야 합니다." #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "pid %(pid)s(%(file)s)이 종료될 때까지 15초 대기함, 포기하는 중" msgid "" "When false, no artifacts can be loaded regardless of available_plugins. When " "true, artifacts can be loaded." msgstr "" "false이면 available_plugins와 관계 없이 아티팩트를 로드할 수 없습니다.true이" "면 아티팩트를 로드할 수 있습니다." msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" "서버를 SSL 모드에서 실행할 때 구성 파일에 cert_file 및 key_file 옵션 값을 모" "두 지정해야 함" msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing " "the request." msgstr "" "true인 경우 이 옵션은 이미지의 소유자를 테넌트로 설정합니다. 그렇지 않은 경" "우 이미지의 소유자는 요청을 발행한 인증된사용자가 됩니다." msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent of " "specifying --insecure on the command line using glanceclient for the API." msgstr "" "레지스트리 서버에 대한 연결에서 SSL을 사용할 때 권한 인증을 통한 유효성 검증" "이 필요하지 않습니다. 이는 API의 glanceclient를 사용하여 명령행에서 --" "insecure를 지정하는 레지스트리와 자원입니다." msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" "이미지 스키마가 제공하는 것 이상으로 이미지 특성을 지정할 수 있도록 허용할지" "의 여부" msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of \"file:///path/to/image\" " "will be returned to the user in the 'direct_url' meta-data field. Revealing " "storage location can be a security risk, so use this setting with caution! " "Setting this to true overrides the show_image_direct_url option." msgstr "" "이미지 특성에 백엔드 이미지 위치를 포함시킬지의 여부. 예를 들어 파일 시스템 " "저장소를 사용하는 경우 URL\"file:///path/to/image\"가 사용자에게 " "'direct_url' 메타데이터 필드로 리턴됩니다. 스토리지 위치를 드러내면 보안 위험" "이 있을 수 있으므로 이 설정을 주의하여 사용하십시오! 이 값을 true로 설정하면 " "show_image_direct_url 옵션을 대체합니다." msgid "" "Whether to include the backend image storage location in image properties. " "Revealing storage location can be a security risk, so use this setting with " "caution!" msgstr "" "이미지 특성에 백엔드 이미지 스토리지 위치를 포함시킬지의 여부. 스토리지 위치" "를 드러낼 경우 보안상 위험할 수 있으므로이 설정은 주의하여 사용하십시오." msgid "" "Whether to pass through headers containing user and tenant information when " "making requests to the registry. This allows the registry to use the context " "middleware without keystonemiddleware's auth_token middleware, removing " "calls to the keystone auth service. It is recommended that when using this " "option, secure communication between glance api and glance registry is " "ensured by means other than auth_token middleware." msgstr "" "레지스트리에 대한 요청을 작성할 때 사용자 및 테넌트 정보를 포함하는 헤더를 전" "달할지 여부를 통해 레지스트리는 keystone 인증 서비스에 대한 호출을 제거하면" "서 keystonemiddleware의 auth_token 미들웨어 없이 컨텍스트 미들웨어를 사용할 " "수 있습니다. 이 옵션을 사용할 경우 auth_token 미들웨어 외의 다른 수단을 통해 " "glance api 및 glance 레지스트리 간의 통신을 보호해야 합니다." msgid "" "Whether to pass through the user token when making requests to the registry. " "To prevent failures with token expiration during big files upload, it is " "recommended to set this parameter to False.If \"use_user_token\" is not in " "effect, then admin credentials can be specified." msgstr "" "레지스트리에 대해 요청을 작성할 때 사용자 토큰을 통과할지 여부입니다. 큰 파일" "을 업로드하는 동안 토큰 만기에 대한 실패를 방지하려면 이 매개변수를 False로 " "설정하는 것이 좋습니다. \"use_user_token\"이 적용되지 않은 경우 관리 신임 정" "보를 지정할 수 있습니다." msgid "" "Work dir for asynchronous task operations. The directory set here will be " "used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size (e.g " "500MB). The image size estimation should be done based on the average size " "in your deployment. Note that depending on the tasks running you may need to " "multiply this number by some factor depending on what the task does. For " "example, you may want to double the available size if image conversion is " "enabled. All this being said, remember these are just estimations and you " "should do them based on the worst case scenario and be prepared to act in " "case they were wrong." msgstr "" "비동기 태스크 조작을 위한 작업 디렉토리입니다. 이 디렉토리 세트는이미지에서 " "작업하는 데 사용됩니다. 일반적으로 대상 저장소에서 가져오기 전에 사용됩니다. " "작업 디렉토리를 제공하는 경우 공간 외부에서 실행하지 않고도 동시 태스크를 효" "율적으로 실행하기에 충분한 공간이 제공되었는지 확인하십시오. `max_workers` 수" "(또는 실행 중인 작업자 수 N)에 평균 이미지 크기(예: 500MB)를 곱하여 대략의 추" "정값을 계산할 수 있습니다. 이미지 크기 추정은 배치의 평균 크기를 기반으로 하" "여 수행되어야 합니다. 실행 중인 태스크에 따라 이 수를 태스크 수행 항목에 따" "른 여러 요인으로 곱해야 할 수 있습니다. 예를 들어 이미지 변환이 사용으로 설정" "된 경우 사용 가능 크기를 두 배로 하려고 할 수 있습니다. 여기서는 추정일 뿐이" "며,최악의 시나리오를 기반으로 하여 이를 수행해야 하고 잘못되는 경우에 대비하" "여 조치를 준비해야 함을 기억하십시오." #, python-format msgid "Wrong command structure: %s" msgstr "잘못된 명령 구조: %s" msgid "You are not authenticated." msgstr "인증되지 않은 사용자입니다." msgid "You are not authorized to complete this action." msgstr "이 조치를 완료할 권한이 없습니다." #, python-format msgid "You are not authorized to lookup image %s." msgstr "이미지 %s을(를) 검색할 권한이 없습니다." #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "이미지 %s의 멤버를 검색할 권한이 없습니다." #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "'%s' 소유의 네임스페이스에 태그를 작성할 권한이 없습니다." msgid "You are not permitted to create image members for the image." msgstr "이미지에 대한 이미지 멤버를 작성할 권한이 없습니다." #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "'%s' 소유의 이미지를 작성할 권한이 없습니다." #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "'%s' 소유의 네임스페이스를 작성할 권한이 없습니다." #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "'%s' 소유의 오브젝트를 작성할 권한이 없습니다." #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "'%s' 소유의 특성을 작성할 권한이 없습니다." #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "'%s' 소유의 resource_type을 작성할 권한이 없습니다." #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "다음 소유자로 이 태스크를 작성하도록 허용되지 않았습니다. %s" msgid "You are not permitted to deactivate this image." msgstr "이 이미지를 비활성화할 권한이 없습니다." msgid "You are not permitted to delete this image." msgstr "이 이미지를 삭제할 권한이 없습니다." msgid "You are not permitted to delete this meta_resource_type." msgstr "이 meta_resource_type을 삭제할 권한이 없습니다." msgid "You are not permitted to delete this namespace." msgstr "이 네임스페이스를 삭제할 권한이 없습니다." msgid "You are not permitted to delete this object." msgstr "이 오브젝트를 삭제할 권한이 없습니다." msgid "You are not permitted to delete this property." msgstr "이 특성을 삭제할 권한이 없습니다." msgid "You are not permitted to delete this tag." msgstr "이 태그를 삭제할 권한이 없습니다." #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "이 %(resource)s에서 '%(attr)s'을(를) 수정하도록 허용되지 않았습니다." #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "이 이미지에서 '%s'을(를) 수정할 권한이 없습니다." msgid "You are not permitted to modify locations for this image." msgstr "이 이미지의 위치를 수정할 권한이 없습니다." msgid "You are not permitted to modify tags on this image." msgstr "이 이미지의 태그를 수정할 권한이 없습니다." msgid "You are not permitted to modify this image." msgstr "이 이미지를 수정할 권한이 없습니다." msgid "You are not permitted to reactivate this image." msgstr "이 이미지를 재활성화할 권한이 없습니다." msgid "You are not permitted to set status on this task." msgstr "이 태스크에서 상태를 설정하도록 허용되지 않았습니다." msgid "You are not permitted to update this namespace." msgstr "이 네임스페이스를 업데이트할 권한이 없습니다." msgid "You are not permitted to update this object." msgstr "이 오브젝트를 업데이트할 권한이 없습니다." msgid "You are not permitted to update this property." msgstr "이 특성을 업데이트할 권한이 없습니다." msgid "You are not permitted to update this tag." msgstr "이 태그를 업데이트할 권한이 없습니다." msgid "You are not permitted to upload data for this image." msgstr "이 이미지에 대한 데이터를 작성할 권한이 없습니다." #, python-format msgid "You cannot add image member for %s" msgstr "%s에 대한 이미지 멤버를 추가할 수 없음" #, python-format msgid "You cannot delete image member for %s" msgstr "%s에 대한 이미지 멤버를 삭제할 수 없음" #, python-format msgid "You cannot get image member for %s" msgstr "%s에 대한 이미지 멤버를 가져올 수 없음" #, python-format msgid "You cannot update image member %s" msgstr "이미지 멤버 %s을(를) 업데이트할 수 없음" msgid "You do not own this image" msgstr "이 이미지를 소유하지 않음" msgid "" "You have selected to use SSL in connecting, and you have supplied a cert, " "however you have failed to supply either a key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" "연결에 SSL을 사용하도록 선택하고 인증을 제공했지만 key_file 매개변수를 제공하" "지 못했거나 GLANCE_CLIENT_KEY_FILE 환경 변수를 설정하지 못했습니다." msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" "연결에 SSL을 사용하도록 선택하고 키를 제공했지만 cert_file 매개변수를 제공하" "지 못했거나 GLANCE_CLIENT_CERT_FILE 환경 변수를 설정하지 못했습니다." msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" msgstr "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-" "fA-F]){12}$" #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "__init__()가 예상치 못한 키워드 인수 '%s'을(를) 가져옴" #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted from_state=" "%(from)s)" msgstr "" "업데이트에서 %(current)s에서 %(next)s(으)로 상태 전이할 수 (from_state=" "%(from)s을(를) 원함)" #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "사용자 정의 특성 (%(props)s)이(가) 기본 특성과 충돌함" msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "이 플랫폼에서 eventlet 'poll'이나 'selects' 허브를 모두 사용할 수 없음" msgid "is_public must be None, True, or False" msgstr "is_public은 None, True 또는 False여야 함" msgid "limit param must be an integer" msgstr "limit 매개변수는 정수여야 함" msgid "limit param must be positive" msgstr "limit 매개변수가 양수여야 함" #, python-format msgid "location: %s data lost" msgstr "위치: %s 데이터 유실" msgid "md5 hash of image contents." msgstr "이미지 컨텐츠의 md5 해시입니다." #, python-format msgid "new_image() got unexpected keywords %s" msgstr "new_image()가 예상치 못한 키워드 %s을(를) 가져옴" msgid "protected must be True, or False" msgstr "protected는 True 또는 False여야 함" #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "%(serv)s을(를) 실행할 수 없음. 오류 발생: %(e)s" #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "x-openstack-request-id가 너무 김, 최대 크기 %s" glance-12.0.0/glance/locale/glance.pot0000664000567000056710000026520412701407047020644 0ustar jenkinsjenkins00000000000000# Translations template for glance. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the glance project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: glance 12.0.0.0b4.dev41\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-12 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: glance/location.py:101 msgid "Invalid location" msgstr "" #: glance/location.py:331 msgid "Invalid locations" msgstr "" #: glance/location.py:341 #, python-format msgid "Original locations is not empty: %s" msgstr "" #: glance/location.py:438 msgid "Signature verification failed" msgstr "" #: glance/location.py:479 #, python-format msgid "Get image %(id)s data failed: %(err)s." msgstr "" #: glance/notifier.py:398 glance/api/v1/upload_utils.py:226 #: glance/api/v2/image_data.py:197 #, python-format msgid "Image storage media is full: %s" msgstr "" #: glance/notifier.py:403 glance/api/v1/upload_utils.py:236 #: glance/api/v2/image_data.py:221 #, python-format msgid "Insufficient permissions on image storage media: %s" msgstr "" #: glance/notifier.py:408 #, python-format msgid "Cannot save data for image %(image_id)s: %(error)s" msgstr "" #: glance/notifier.py:415 #, python-format msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" msgstr "" #: glance/notifier.py:422 #, python-format msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" msgstr "" #: glance/notifier.py:430 #, python-format msgid "" "Image %(image_id)s could not be found after upload. The image may have " "been deleted during the upload: %(error)s" msgstr "" #: glance/notifier.py:438 #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to HTTP error: " "%(error)s" msgstr "" #: glance/notifier.py:445 #, python-format msgid "" "Failed to upload image data for image %(image_id)s due to internal error:" " %(error)s" msgstr "" #: glance/schema.py:79 #, python-format msgid "custom properties (%(props)s) conflict with base properties" msgstr "" #: glance/scrubber.py:37 msgid "The amount of time in seconds to delay before performing a delete." msgstr "" #: glance/scrubber.py:40 msgid "" "The size of thread pool to be used for scrubbing images. The default is " "one, which signifies serial scrubbing. Any value above one indicates the " "max number of images that may be scrubbed in parallel." msgstr "" #: glance/scrubber.py:46 msgid "Turn on/off delayed delete." msgstr "" #: glance/scrubber.py:48 glance/api/middleware/context.py:34 msgid "Role used to identify an authenticated user as administrator." msgstr "" #: glance/scrubber.py:51 glance/registry/client/v1/api.py:34 msgid "" "Whether to pass through headers containing user and tenant information " "when making requests to the registry. This allows the registry to use the" " context middleware without keystonemiddleware's auth_token middleware, " "removing calls to the keystone auth service. It is recommended that when " "using this option, secure communication between glance api and glance " "registry is ensured by means other than auth_token middleware." msgstr "" #: glance/scrubber.py:64 msgid "Loop time between checking for new items to schedule for delete." msgstr "" #: glance/scrubber.py:72 msgid "" "Run as a long-running process. When not specified (the default) run the " "scrub operation once and then exits. When specified do not exit and run " "scrub on wakeup_time interval as specified in the config." msgstr "" #: glance/api/authorization.py:131 glance/api/authorization.py:224 msgid "Public images do not have members." msgstr "" #: glance/api/authorization.py:140 glance/api/authorization.py:151 #, python-format msgid "You cannot get image member for %s" msgstr "" #: glance/api/authorization.py:159 #, python-format msgid "You cannot delete image member for %s" msgstr "" #: glance/api/authorization.py:168 #, python-format msgid "You cannot add image member for %s" msgstr "" #: glance/api/authorization.py:177 #, python-format msgid "You cannot update image member %s" msgstr "" #: glance/api/authorization.py:196 #, python-format msgid "You are not permitted to create images owned by '%s'." msgstr "" #: glance/api/authorization.py:219 msgid "You are not permitted to create image members for the image." msgstr "" #: glance/api/authorization.py:240 #, python-format msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." msgstr "" #: glance/api/authorization.py:250 msgid "You are not permitted to modify locations for this image." msgstr "" #: glance/api/authorization.py:274 #, python-format msgid "You are not permitted to modify '%s' on this image." msgstr "" #: glance/api/authorization.py:278 msgid "You are not permitted to modify this image." msgstr "" #: glance/api/authorization.py:291 msgid "You are not permitted to modify tags on this image." msgstr "" #: glance/api/authorization.py:331 msgid "You are not permitted to delete this image." msgstr "" #: glance/api/authorization.py:338 msgid "You are not permitted to upload data for this image." msgstr "" #: glance/api/authorization.py:342 msgid "You are not permitted to deactivate this image." msgstr "" #: glance/api/authorization.py:346 msgid "You are not permitted to reactivate this image." msgstr "" #: glance/api/authorization.py:383 glance/api/authorization.py:387 #: glance/api/authorization.py:391 msgid "You are not permitted to set status on this task." msgstr "" #: glance/api/authorization.py:450 #, python-format msgid "You are not permitted to create this task with owner as: %s" msgstr "" #: glance/api/authorization.py:515 msgid "You are not permitted to delete this namespace." msgstr "" #: glance/api/authorization.py:519 msgid "You are not permitted to update this namespace." msgstr "" #: glance/api/authorization.py:545 #, python-format msgid "You are not permitted to create namespace owned by '%s'" msgstr "" #: glance/api/authorization.py:604 msgid "You are not permitted to delete this object." msgstr "" #: glance/api/authorization.py:608 msgid "You are not permitted to update this object." msgstr "" #: glance/api/authorization.py:633 #, python-format msgid "You are not permitted to create object owned by '%s'" msgstr "" #: glance/api/authorization.py:696 msgid "You are not permitted to delete this meta_resource_type." msgstr "" #: glance/api/authorization.py:722 #, python-format msgid "You are not permitted to create resource_type owned by '%s'" msgstr "" #: glance/api/authorization.py:780 msgid "You are not permitted to delete this property." msgstr "" #: glance/api/authorization.py:784 msgid "You are not permitted to update this property." msgstr "" #: glance/api/authorization.py:809 #, python-format msgid "You are not permitted to create property owned by '%s'" msgstr "" #: glance/api/authorization.py:867 msgid "You are not permitted to delete this tag." msgstr "" #: glance/api/authorization.py:871 msgid "You are not permitted to update this tag." msgstr "" #: glance/api/authorization.py:892 msgid "Owner must be specified to create a tag." msgstr "" #: glance/api/authorization.py:895 #, python-format msgid "You are not permitted to create a tag in the namespace owned by '%s'" msgstr "" #: glance/api/common.py:65 #, python-format msgid "Corrupt image download for image %(image_id)s" msgstr "" #: glance/api/policy.py:159 glance/quota/__init__.py:357 #, python-format msgid "Invalid locations: %s" msgstr "" #: glance/api/versions.py:27 glance/api/glare/versions.py:28 msgid "" "Public url to use for versions endpoint. The default is None, which will " "use the request's host_url attribute to populate the URL base. If Glance " "is operating behind a proxy, you will want to change this to represent " "the proxy's URL." msgstr "" #: glance/api/glare/v0_1/glare.py:121 #, python-format msgid "The format of the version %s is not valid. Use semver notation" msgstr "" #: glance/api/glare/v0_1/glare.py:188 #, python-format msgid "Invalid Content-Type for work with %s" msgstr "" #: glance/api/glare/v0_1/glare.py:225 msgid "Invalid request PATCH for work with blob" msgstr "" #: glance/api/glare/v0_1/glare.py:239 #, python-format msgid "Denying attempt to upload artifact because it exceeds the quota: %s" msgstr "" #: glance/api/glare/v0_1/glare.py:263 #, python-format msgid "Failed to find artifact %(artifact_id)s to delete" msgstr "" #: glance/api/glare/v0_1/glare.py:267 #, python-format msgid "Artifact %s could not be deleted because it is in use: %s" msgstr "" #: glance/api/glare/v0_1/glare.py:296 msgid "Unable to PUT to non-empty collection" msgstr "" #: glance/api/glare/v0_1/glare.py:299 msgid "Index is out of range" msgstr "" #: glance/api/glare/v0_1/glare.py:322 #, python-format msgid "Not a blob property '%s'" msgstr "" #: glance/api/glare/v0_1/glare.py:330 #, python-format msgid "Not a list property '%s'" msgstr "" #: glance/api/glare/v0_1/glare.py:344 msgid "Error in store configuration. Adding artifacts to store is disabled." msgstr "" #: glance/api/glare/v0_1/glare.py:366 #, python-format msgid "Artifact storage media is full: %s" msgstr "" #: glance/api/glare/v0_1/glare.py:374 #, python-format msgid "Artifact exceeds the storage quota: %s" msgstr "" #: glance/api/glare/v0_1/glare.py:382 #, python-format msgid "The incoming artifact blob is too large: %s" msgstr "" #: glance/api/glare/v0_1/glare.py:390 #, python-format msgid "Insufficient permissions on artifact storage media: %s" msgstr "" #: glance/api/glare/v0_1/glare.py:423 msgid "Index is required" msgstr "" #: glance/api/glare/v0_1/glare.py:431 msgid "Not a list property" msgstr "" #: glance/api/glare/v0_1/glare.py:435 msgid "Not a downloadable entity" msgstr "" #: glance/api/glare/v0_1/glare.py:529 glance/api/v2/image_members.py:242 #: glance/api/v2/images.py:365 glance/api/v2/metadef_namespaces.py:407 #: glance/api/v2/metadef_objects.py:237 glance/api/v2/metadef_properties.py:183 #: glance/api/v2/metadef_resource_types.py:150 #: glance/api/v2/metadef_tags.py:269 glance/api/v2/tasks.py:151 #: glance/tests/functional/v2/test_metadef_resourcetypes.py:144 msgid "Body expected in request." msgstr "" #: glance/api/glare/v0_1/glare.py:577 #, python-format msgid "Invalid headers \"Content-Type\": %s" msgstr "" #: glance/api/glare/v0_1/glare.py:615 #, python-format msgid "Invalid json body: %s" msgstr "" #: glance/api/glare/v0_1/glare.py:628 msgid "Only list indexes are allowed for blob lists" msgstr "" #: glance/api/glare/v0_1/glare.py:649 msgid "Limit param must be an integer" msgstr "" #: glance/api/glare/v0_1/glare.py:653 msgid "Limit param must be positive" msgstr "" #: glance/api/glare/v0_1/glare.py:657 #, python-format msgid "Limit param must not be higher than %d" msgstr "" #: glance/api/glare/v0_1/glare.py:667 #, python-format msgid "" "Invalid sort key: %(sort_key)s. If type version is not set it must be one" " of the following: %(available)s." msgstr "" #: glance/api/glare/v0_1/glare.py:678 #, python-format msgid "Invalid sort key: %(sort_key)s. You cannot sort by this property" msgstr "" #: glance/api/glare/v0_1/glare.py:687 glance/api/v2/images.py:604 #: glance/api/v2/metadef_namespaces.py:444 glance/api/v2/metadef_objects.py:283 #: glance/api/v2/metadef_tags.py:275 glance/api/v2/tasks.py:157 #, python-format msgid "Invalid sort direction: %s" msgstr "" #: glance/api/middleware/context.py:29 msgid "" "When true, this option sets the owner of an image to be the tenant. " "Otherwise, the owner of the image will be the authenticated user issuing" " the request." msgstr "" #: glance/api/middleware/context.py:37 msgid "" "Allow unauthenticated users to access the API with read-only privileges. " "This only applies when using ContextMiddleware." msgstr "" #: glance/api/middleware/context.py:41 msgid "Limits request ID length." msgstr "" #: glance/api/middleware/context.py:55 msgid "Unable to retrieve request id from context" msgstr "" #: glance/api/middleware/context.py:120 msgid "Invalid service catalog json." msgstr "" #: glance/api/middleware/context.py:125 #, python-format msgid "x-openstack-request-id is too long, max size %s" msgstr "" #: glance/api/v1/controller.py:44 glance/api/v1/members.py:78 #, python-format msgid "Image with identifier %s not found" msgstr "" #: glance/api/v1/controller.py:49 msgid "Forbidden image access" msgstr "" #: glance/api/v1/controller.py:61 #, python-format msgid "Image %s is deactivated" msgstr "" #: glance/api/v1/controller.py:66 #, python-format msgid "Image %s is not active" msgstr "" #: glance/api/v1/controller.py:88 #, python-format msgid "Store for image_id not found: %s" msgstr "" #: glance/api/v1/controller.py:93 #, python-format msgid "Data for image_id not found: %s" msgstr "" #: glance/api/v1/images.py:87 #, python-format msgid "Invalid time format for %s." msgstr "" #: glance/api/v1/images.py:102 #, python-format msgid "Invalid disk format '%s' for image." msgstr "" #: glance/api/v1/images.py:107 #, python-format msgid "Invalid container format '%s' for image." msgstr "" #: glance/api/v1/images.py:117 msgid "" "Invalid mix of disk and container formats. When setting a disk or " "container format to one of 'aki', 'ari', or 'ami', the container and disk" " formats must match." msgstr "" #: glance/api/v1/images.py:132 #, python-format msgid "Image name too long: %d" msgstr "" #: glance/api/v1/images.py:139 #, python-format msgid "Invalid checksum '%s': can't exceed 32 characters" msgstr "" #: glance/api/v1/images.py:217 #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(num)s, Maximum: %(quota)s" msgstr "" #: glance/api/v1/images.py:239 glance/api/v1/images.py:283 #: glance/api/v1/images.py:317 #, python-format msgid "Property '%s' is protected" msgstr "" #: glance/api/v1/images.py:380 msgid "This operation is currently not permitted on Glance images details." msgstr "" #: glance/api/v1/images.py:433 #, python-format msgid "Bad value passed to filter %(filter)s got %(val)s" msgstr "" #: glance/api/v1/images.py:469 #, python-format msgid "External sources are not supported: '%s'" msgstr "" #: glance/api/v1/images.py:471 msgid "External source should not be empty" msgstr "" #: glance/api/v1/images.py:571 #, python-format msgid "Required store %s is invalid" msgstr "" #: glance/api/v1/images.py:584 #, python-format msgid "Invalid location %s" msgstr "" #: glance/api/v1/images.py:606 #, python-format msgid "An image with identifier %s already exists" msgstr "" #: glance/api/v1/images.py:613 #, python-format msgid "Failed to reserve image. Got error: %s" msgstr "" #: glance/api/v1/images.py:620 msgid "Forbidden to reserve image." msgstr "" #: glance/api/v1/images.py:664 msgid "Content-Type must be application/octet-stream" msgstr "" #: glance/api/v1/images.py:721 #, python-format msgid "Failed to activate image. Got error: %s" msgstr "" #: glance/api/v1/images.py:793 msgid "It's invalid to provide multiple image sources." msgstr "" #: glance/api/v1/images.py:828 #, python-format msgid "" "Provided image size must match the stored image size. (provided size: " "%(ps)d, stored size: %(ss)d)" msgstr "" #: glance/api/v1/images.py:847 msgid "Disk format is not specified." msgstr "" #: glance/api/v1/images.py:852 msgid "Container format is not specified." msgstr "" #: glance/api/v1/images.py:958 msgid "Forbidden to update deleted image." msgstr "" #: glance/api/v1/images.py:970 #, python-format msgid "Forbidden to modify '%(key)s' of %(status)s image." msgstr "" #: glance/api/v1/images.py:979 #, python-format msgid "Forbidden to modify '%s' of image." msgstr "" #: glance/api/v1/images.py:997 msgid "Cannot upload to an unqueued image" msgstr "" #: glance/api/v1/images.py:1014 glance/common/scripts/utils.py:87 #, python-format msgid "Invalid location: %s" msgstr "" #: glance/api/v1/images.py:1021 msgid "Attempted to update Location field for an image not in queued status." msgstr "" #: glance/api/v1/images.py:1061 glance/registry/api/v1/images.py:479 #, python-format msgid "Failed to update image metadata. Got error: %s" msgstr "" #: glance/api/v1/images.py:1068 #, python-format msgid "Failed to find image to update: %s" msgstr "" #: glance/api/v1/images.py:1075 #, python-format msgid "Forbidden to update image: %s" msgstr "" #: glance/api/v1/images.py:1083 msgid "Image operation conflicts" msgstr "" #: glance/api/v1/images.py:1114 msgid "Image is protected" msgstr "" #: glance/api/v1/images.py:1121 #, python-format msgid "Forbidden to delete a %s image." msgstr "" #: glance/api/v1/images.py:1128 glance/api/v2/image_members.py:67 #: glance/api/v2/image_tags.py:52 glance/api/v2/image_tags.py:81 #, python-format msgid "Image %s not found." msgstr "" #: glance/api/v1/images.py:1163 #, python-format msgid "Failed to find image to delete: %s" msgstr "" #: glance/api/v1/images.py:1170 #, python-format msgid "Forbidden to delete image: %s" msgstr "" #: glance/api/v1/images.py:1177 glance/api/v2/images.py:246 #, python-format msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" msgstr "" #: glance/api/v1/images.py:1201 #, python-format msgid "Store for scheme %s not found" msgstr "" #: glance/api/v1/images.py:1240 glance/api/v1/upload_utils.py:246 #, python-format msgid "Denying attempt to upload image larger than %d bytes." msgstr "" #: glance/api/v1/members.py:42 glance/registry/api/v1/members.py:34 msgid "No authenticated user" msgstr "" #: glance/api/v1/members.py:55 #, python-format msgid "Image with identifier %s has been deleted." msgstr "" #: glance/api/v1/members.py:82 msgid "Unauthorized image access" msgstr "" #: glance/api/v1/members.py:120 glance/common/exception.py:296 #, python-format msgid "" "The limit has been exceeded on the number of allowed image members for " "this image. Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" #: glance/api/v1/upload_utils.py:131 #, python-format msgid "" "Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded " "image (%(actual)s) did not match. Setting image status to 'killed'." msgstr "" #: glance/api/v1/upload_utils.py:179 #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted" " during the upload." msgstr "" #: glance/api/v1/upload_utils.py:195 glance/api/v2/image_data.py:175 msgid "Error in store configuration. Adding images to store is disabled." msgstr "" #: glance/api/v1/upload_utils.py:204 #, python-format msgid "Attempt to upload duplicate image: %s" msgstr "" #: glance/api/v1/upload_utils.py:216 #, python-format msgid "Forbidden upload attempt: %s" msgstr "" #: glance/api/v1/upload_utils.py:256 glance/api/v2/images.py:159 #, python-format msgid "Denying attempt to upload image because it exceeds the quota: %s" msgstr "" #: glance/api/v1/upload_utils.py:277 msgid "Client disconnected before sending all data to backend" msgstr "" #: glance/api/v1/upload_utils.py:285 #, python-format msgid "Failed to upload image %s" msgstr "" #: glance/api/v2/image_data.py:139 #, python-format msgid "" "Image %s could not be found after upload. The image may have been deleted" " during the upload, cleaning up the chunks uploaded." msgstr "" #: glance/api/v2/image_data.py:154 #, python-format msgid "" "Authentication error - the token may have expired during file upload. " "Deleting image data for %s." msgstr "" #: glance/api/v2/image_data.py:205 #, python-format msgid "Image exceeds the storage quota: %s" msgstr "" #: glance/api/v2/image_data.py:213 #, python-format msgid "The incoming image is too large: %s" msgstr "" #: glance/api/v2/image_data.py:256 msgid "" "The requested image has been deactivated. Image data download is " "forbidden." msgstr "" #: glance/api/v2/image_members.py:56 #, python-format msgid "Error fetching members of image %(image_id)s: %(inner_msg)s" msgstr "" #: glance/api/v2/image_members.py:71 #, python-format msgid "You are not authorized to lookup image %s." msgstr "" #: glance/api/v2/image_members.py:80 #, python-format msgid "%(m_id)s not found in the member list of the image %(i_id)s." msgstr "" #: glance/api/v2/image_members.py:86 #, python-format msgid "You are not authorized to lookup the members of the image %s." msgstr "" #: glance/api/v2/image_members.py:117 #, python-format msgid "Not allowed to create members for image %s." msgstr "" #: glance/api/v2/image_members.py:121 #, python-format msgid "Member %(member_id)s is duplicated for image %(image_id)s" msgstr "" #: glance/api/v2/image_members.py:127 #, python-format msgid "Image member limit exceeded for image %(id)s: %(e)s:" msgstr "" #: glance/api/v2/image_members.py:157 #, python-format msgid "Not allowed to update members for image %s." msgstr "" #: glance/api/v2/image_members.py:161 #, python-format msgid "Incorrect request: %s" msgstr "" #: glance/api/v2/image_members.py:190 #, python-format msgid "Not allowed to list members for image %s." msgstr "" #: glance/api/v2/image_members.py:229 #, python-format msgid "Not allowed to delete members for image %s." msgstr "" #: glance/api/v2/image_members.py:253 msgid "Member to be added not specified" msgstr "" #: glance/api/v2/image_members.py:256 msgid "Member can't be empty" msgstr "" #: glance/api/v2/image_members.py:259 msgid "Expected a member in the form: {\"member\": \"image_id\"}" msgstr "" #: glance/api/v2/image_members.py:269 msgid "Status not specified" msgstr "" #: glance/api/v2/image_members.py:272 msgid "Expected a status in the form: {\"status\": \"status\"}" msgstr "" #: glance/api/v2/image_members.py:328 msgid "An identifier for the image member (tenantId)" msgstr "" #: glance/api/v2/image_members.py:332 glance/api/v2/images.py:834 msgid "An identifier for the image" msgstr "" #: glance/api/v2/image_members.py:338 msgid "Date and time of image member creation" msgstr "" #: glance/api/v2/image_members.py:345 msgid "Date and time of last modification of image member" msgstr "" #: glance/api/v2/image_members.py:350 msgid "The status of this image member" msgstr "" #: glance/api/v2/image_tags.py:56 #, python-format msgid "Not allowed to update tags for image %s." msgstr "" #: glance/api/v2/image_tags.py:60 #, python-format msgid "Could not update image: %s" msgstr "" #: glance/api/v2/image_tags.py:65 #, python-format msgid "Image tag limit exceeded for image %(id)s: %(e)s:" msgstr "" #: glance/api/v2/image_tags.py:85 #, python-format msgid "Not allowed to delete tags for image %s." msgstr "" #: glance/api/v2/images.py:178 msgid "Cannot set locations to empty list." msgstr "" #: glance/api/v2/images.py:183 msgid "Owner can't be updated by non admin." msgstr "" #: glance/api/v2/images.py:191 glance/api/v2/images.py:227 #, python-format msgid "Property %s does not exist." msgstr "" #: glance/api/v2/images.py:205 #, python-format msgid "Property %s already present." msgstr "" #: glance/api/v2/images.py:222 #, python-format msgid "Property %s may not be removed." msgstr "" #: glance/api/v2/images.py:241 #, python-format msgid "Failed to find image %(image_id)s to delete" msgstr "" #: glance/api/v2/images.py:271 msgid "It's not allowed to update locations if locations are invisible." msgstr "" #: glance/api/v2/images.py:289 msgid "It's not allowed to add locations if locations are invisible." msgstr "" #: glance/api/v2/images.py:296 msgid "Invalid position for adding a location." msgstr "" #: glance/api/v2/images.py:310 msgid "It's not allowed to remove locations if locations are invisible." msgstr "" #: glance/api/v2/images.py:317 msgid "Cannot remove last location in the image." msgstr "" #: glance/api/v2/images.py:322 msgid "Invalid position for removing a location." msgstr "" #: glance/api/v2/images.py:373 glance/api/v2/images.py:487 #: glance/api/v2/metadef_namespaces.py:415 glance/api/v2/metadef_objects.py:314 #: glance/api/v2/metadef_properties.py:191 #: glance/api/v2/metadef_resource_types.py:158 #: glance/api/v2/metadef_tags.py:346 #: glance/tests/functional/v2/test_metadef_resourcetypes.py:152 #, python-format msgid "Attribute '%s' is read-only." msgstr "" #: glance/api/v2/images.py:403 #, python-format msgid "" "Unable to find `op` in JSON Schema change. It must be one of the " "following: %(available)s." msgstr "" #: glance/api/v2/images.py:408 #, python-format msgid "" "Invalid operation: `%(op)s`. It must be one of the following: " "%(available)s." msgstr "" #: glance/api/v2/images.py:420 msgid "" "Operation objects must contain only one member named \"add\", \"remove\"," " or \"replace\"." msgstr "" #: glance/api/v2/images.py:425 msgid "" "Operation objects must contain exactly one member named \"add\", " "\"remove\", or \"replace\"." msgstr "" #: glance/api/v2/images.py:434 #, python-format msgid "Unable to find '%s' in JSON Schema change" msgstr "" #: glance/api/v2/images.py:462 #, python-format msgid "Pointer `%s` does not start with \"/\"." msgstr "" #: glance/api/v2/images.py:465 #, python-format msgid "Pointer `%s` contains adjacent \"/\"." msgstr "" #: glance/api/v2/images.py:468 #, python-format msgid "Pointer `%s` end with \"/\"." msgstr "" #: glance/api/v2/images.py:471 #, python-format msgid "Pointer `%s` does not contains valid token." msgstr "" #: glance/api/v2/images.py:474 #, python-format msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." msgstr "" #: glance/api/v2/images.py:480 #, python-format msgid "Operation \"%s\" requires a member named \"value\"." msgstr "" #: glance/api/v2/images.py:490 #, python-format msgid "Attribute '%s' is reserved." msgstr "" #: glance/api/v2/images.py:520 #, python-format msgid "Invalid JSON pointer for this resource: '/%s'" msgstr "" #: glance/api/v2/images.py:532 msgid "Unrecognized JSON Schema draft version" msgstr "" #: glance/api/v2/images.py:554 msgid "Request body must be a JSON array of operation objects." msgstr "" #: glance/api/v2/images.py:559 msgid "Operations must be JSON objects." msgstr "" #: glance/api/v2/images.py:583 glance/api/v2/metadef_namespaces.py:462 #: glance/api/v2/metadef_objects.py:301 glance/api/v2/metadef_tags.py:293 #: glance/api/v2/tasks.py:187 glance/registry/api/v1/images.py:267 msgid "limit param must be an integer" msgstr "" #: glance/api/v2/images.py:587 glance/api/v2/metadef_namespaces.py:466 #: glance/api/v2/metadef_objects.py:305 glance/api/v2/metadef_tags.py:297 #: glance/api/v2/tasks.py:191 glance/registry/api/v1/images.py:270 msgid "limit param must be positive" msgstr "" #: glance/api/v2/images.py:594 #, python-format msgid "" "Invalid sort key: %(sort_key)s. It must be one of the following: " "%(available)s." msgstr "" #: glance/api/v2/images.py:611 #, python-format msgid "Invalid status: %s" msgstr "" #: glance/api/v2/images.py:620 glance/api/v2/metadef_namespaces.py:453 #: glance/api/v2/metadef_objects.py:292 glance/api/v2/metadef_tags.py:284 #, python-format msgid "Invalid visibility value: %s" msgstr "" #: glance/api/v2/images.py:624 msgid "The \"changes-since\" filter is no longer available on v2." msgstr "" #: glance/api/v2/images.py:643 msgid "Old and new sorting syntax cannot be combined" msgstr "" #: glance/api/v2/images.py:674 msgid "Number of sort dirs does not match the number of sort keys" msgstr "" #: glance/api/v2/images.py:840 msgid "Descriptive name for the image" msgstr "" #: glance/api/v2/images.py:846 msgid "Status of the image" msgstr "" #: glance/api/v2/images.py:852 msgid "Scope of image accessibility" msgstr "" #: glance/api/v2/images.py:857 msgid "If true, image will not be deletable." msgstr "" #: glance/api/v2/images.py:862 msgid "md5 hash of image contents." msgstr "" #: glance/api/v2/images.py:867 msgid "Owner of the image" msgstr "" #: glance/api/v2/images.py:873 msgid "Size of image file in bytes" msgstr "" #: glance/api/v2/images.py:878 msgid "Virtual size of image in bytes" msgstr "" #: glance/api/v2/images.py:882 msgid "Format of the container" msgstr "" #: glance/api/v2/images.py:887 msgid "Format of the disk" msgstr "" #: glance/api/v2/images.py:893 msgid "Date and time of image registration" msgstr "" #: glance/api/v2/images.py:902 msgid "Date and time of the last image modification" msgstr "" #: glance/api/v2/images.py:908 msgid "List of strings related to the image" msgstr "" #: glance/api/v2/images.py:917 msgid "URL to access the image file kept in external store" msgstr "" #: glance/api/v2/images.py:922 msgid "Amount of ram (in MB) required to boot image." msgstr "" #: glance/api/v2/images.py:926 msgid "Amount of disk space (in GB) required to boot image." msgstr "" #: glance/api/v2/images.py:932 msgid "An image self url" msgstr "" #: glance/api/v2/images.py:937 msgid "An image file url" msgstr "" #: glance/api/v2/images.py:942 msgid "An image schema url" msgstr "" #: glance/api/v2/images.py:959 msgid "A set of URLs to access the image file kept in external store" msgstr "" #: glance/api/v2/metadef_namespaces.py:675 msgid "The unique namespace text." msgstr "" #: glance/api/v2/metadef_namespaces.py:680 msgid "The user friendly name for the namespace. Used by UI if available." msgstr "" #: glance/api/v2/metadef_namespaces.py:686 msgid "Provides a user friendly description of the namespace." msgstr "" #: glance/api/v2/metadef_namespaces.py:692 msgid "Scope of namespace accessibility." msgstr "" #: glance/api/v2/metadef_namespaces.py:697 msgid "If true, namespace will not be deletable." msgstr "" #: glance/api/v2/metadef_namespaces.py:701 msgid "Owner of the namespace." msgstr "" #: glance/api/v2/metadef_namespaces.py:707 msgid "Date and time of namespace creation" msgstr "" #: glance/api/v2/metadef_namespaces.py:713 msgid "Date and time of the last namespace modification" msgstr "" #: glance/api/v2/metadef_objects.py:197 msgid "Date and time of object creation" msgstr "" #: glance/api/v2/metadef_objects.py:203 msgid "Date and time of the last object modification" msgstr "" #: glance/api/v2/metadef_properties.py:94 #, python-format msgid "" "Property %(property_name)s does not start with the expected resource type" " association prefix of '%(prefix)s'." msgstr "" #: glance/api/v2/metadef_resource_types.py:131 #: glance/tests/functional/v2/test_metadef_resourcetypes.py:125 #, python-format msgid "Failed to find resource type %(resourcetype)s to delete" msgstr "" #: glance/api/v2/metadef_resource_types.py:204 #: glance/tests/functional/v2/test_metadef_resourcetypes.py:199 msgid "" "Resource type names should be aligned with Heat resource types whenever " "possible: " "http://docs.openstack.org/developer/heat/template_guide/openstack.html" msgstr "" #: glance/api/v2/metadef_resource_types.py:212 #: glance/tests/functional/v2/test_metadef_resourcetypes.py:207 msgid "" "Specifies the prefix to use for the given resource type. Any properties " "in the namespace should be prefixed with this prefix when being applied " "to the specified resource type. Must include prefix separator (e.g. a " "colon :)." msgstr "" #: glance/api/v2/metadef_resource_types.py:221 #: glance/tests/functional/v2/test_metadef_resourcetypes.py:216 msgid "" "Some resource types allow more than one key / value pair per instance. " "For example, Cinder allows user and image metadata on volumes. Only the " "image properties metadata is evaluated by Nova (scheduling or drivers). " "This property allows a namespace target to remove the ambiguity." msgstr "" #: glance/api/v2/metadef_resource_types.py:233 #: glance/tests/functional/v2/test_metadef_resourcetypes.py:228 msgid "Date and time of resource type association" msgstr "" #: glance/api/v2/metadef_resource_types.py:239 #: glance/tests/functional/v2/test_metadef_resourcetypes.py:234 msgid "Date and time of the last resource type association modification" msgstr "" #: glance/api/v2/metadef_tags.py:198 msgid "Date and time of tag creation" msgstr "" #: glance/api/v2/metadef_tags.py:204 msgid "Date and time of the last tag modification" msgstr "" #: glance/api/v2/tasks.py:137 msgid "" "This operation is currently not permitted on Glance Tasks. They are auto " "deleted after reaching the time based on their expires_at property." msgstr "" #: glance/api/v2/tasks.py:166 #, python-format msgid "Invalid status value: %s" msgstr "" #: glance/api/v2/tasks.py:172 #, python-format msgid "Invalid type value: %s" msgstr "" #: glance/api/v2/tasks.py:179 glance/registry/api/v1/images.py:279 msgid "Invalid marker format" msgstr "" #: glance/api/v2/tasks.py:200 #, python-format msgid "Task '%s' is required" msgstr "" #: glance/api/v2/tasks.py:325 msgid "An identifier for the task" msgstr "" #: glance/api/v2/tasks.py:326 msgid "" "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0" "-9a-fA-F]){12}$" msgstr "" #: glance/api/v2/tasks.py:331 msgid "The type of task represented by this content" msgstr "" #: glance/api/v2/tasks.py:338 msgid "The current status of this task" msgstr "" #: glance/api/v2/tasks.py:348 msgid "The parameters required by task, JSON blob" msgstr "" #: glance/api/v2/tasks.py:352 msgid "The result of current task, JSON blob" msgstr "" #: glance/api/v2/tasks.py:356 msgid "An identifier for the owner of this task" msgstr "" #: glance/api/v2/tasks.py:360 msgid "" "Human-readable informative message only included when appropriate " "(usually on failure)" msgstr "" #: glance/api/v2/tasks.py:365 msgid "Datetime when this resource would be subject to removal" msgstr "" #: glance/api/v2/tasks.py:370 msgid "Datetime when this resource was created" msgstr "" #: glance/api/v2/tasks.py:374 msgid "Datetime when this resource was updated" msgstr "" #: glance/async/taskflow_executor.py:40 msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." msgstr "" #: glance/async/taskflow_executor.py:44 msgid "" "The number of parallel activities executed at the same time by the " "engine. The value can be greater than one when the engine mode is " "'parallel'." msgstr "" #: glance/async/taskflow_executor.py:144 msgid "Task failed due to Internal Error" msgstr "" #: glance/async/flows/base_import.py:95 #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Missing work dir: " "%(work_dir)s" msgstr "" #: glance/async/flows/base_import.py:127 #, python-format msgid "" "%(task_id)s of %(task_type)s not configured properly. Could not load the " "filesystem store" msgstr "" #: glance/async/flows/base_import.py:170 #, python-format msgid "File %(path)s has invalid backing file %(bfile)s, aborting." msgstr "" #: glance/async/flows/convert.py:31 msgid "" "The format to which images will be automatically converted. When using " "the RBD backend, this should be set to 'raw'" msgstr "" #: glance/async/flows/ovf_process.py:111 msgid "OVA extract is limited to admin" msgstr "" #: glance/async/flows/ovf_process.py:151 msgid "Could not find OVF file in OVA archive file." msgstr "" #: glance/async/flows/ovf_process.py:202 msgid "Currently, OVA packages containing multiple disk are not supported." msgstr "" #: glance/async/flows/ovf_process.py:243 msgid "" "OVF metadata of interest was not specified in ovf-metadata.json config " "file. Please set \"cim_pasd\" to a list of interested " "CIM_ProcessorAllocationSettingData properties." msgstr "" #: glance/async/flows/ovf_process.py:249 msgid "OVF properties config file \"ovf-metadata.json\" was not found." msgstr "" #: glance/cmd/__init__.py:41 #, python-format msgid "" "It appears that the eventlet module has been imported prior to setting " "%s='yes'. It is currently necessary to disable eventlet.greendns if using" " ipv6 since eventlet.greendns currently breaks with ipv6 addresses. " "Please ensure that eventlet is not imported prior to this being set." msgstr "" #: glance/cmd/control.py:104 #, python-format msgid "%(serv)s appears to already be running: %(pid)s" msgstr "" #: glance/cmd/control.py:108 #, python-format msgid "Removing stale pid file %s" msgstr "" #: glance/cmd/control.py:117 msgid "Unable to increase file descriptor limit. Running as non-root?" msgstr "" #: glance/cmd/control.py:164 #, python-format msgid "%(verb)sing %(serv)s with %(conf)s" msgstr "" #: glance/cmd/control.py:167 #, python-format msgid "%(verb)sing %(serv)s" msgstr "" #: glance/cmd/control.py:179 #, python-format msgid "unable to launch %(serv)s. Got error: %(e)s" msgstr "" #: glance/cmd/control.py:210 #, python-format msgid "%(serv)s (pid %(pid)s) is running..." msgstr "" #: glance/cmd/control.py:213 #, python-format msgid "%s is stopped" msgstr "" #: glance/cmd/control.py:229 #, python-format msgid "" "Unable to create pid file %(pid)s. Running as non-root?\n" "Falling back to a temp file, you can stop %(service)s service using:\n" " %(file)s %(server)s stop --pid-file %(fb)s" msgstr "" #: glance/cmd/control.py:246 #, python-format msgid "Reload of %(serv)s not supported" msgstr "" #: glance/cmd/control.py:254 #, python-format msgid "Server %(serv)s is stopped" msgstr "" #: glance/cmd/control.py:259 #, python-format msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "" #: glance/cmd/control.py:263 glance/cmd/control.py:285 #, python-format msgid "Process %d not running" msgstr "" #: glance/cmd/control.py:281 #, python-format msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" msgstr "" #: glance/cmd/control.py:292 #, python-format msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" msgstr "" #: glance/cmd/control.py:295 #, python-format msgid "%s is already stopped" msgstr "" #: glance/cmd/control.py:372 #, python-format msgid "Suppressed respawn as %(serv)s was %(rsn)s." msgstr "" #: glance/cmd/manage.py:163 msgid "Must supply a positive, non-zero value for age." msgstr "" #: glance/cmd/manage.py:165 msgid "Maximal age is count of days since epoch." msgstr "" #: glance/cmd/manage.py:167 msgid "Minimal rows limit is 1." msgstr "" #: glance/cmd/manage.py:320 msgid "Available categories:" msgstr "" #: glance/cmd/manage.py:322 #, python-format msgid "\t%s" msgstr "" #: glance/cmd/replicator.py:106 #, python-format msgid "" "The image %s is already present on the slave, but our check for it did " "not find it. This indicates that we do not have permissions to see all " "the images on the slave server." msgstr "" #: glance/cmd/replicator.py:326 glance/cmd/replicator.py:358 #: glance/cmd/replicator.py:434 glance/cmd/replicator.py:509 #: glance/cmd/replicator.py:579 msgid "Too few arguments." msgstr "" #: glance/cmd/replicator.py:342 #, python-format msgid "Total size is %(size)d bytes across %(img_count)d images" msgstr "" #: glance/cmd/replicator.py:694 #, python-format msgid "Unknown command: %s" msgstr "" #: glance/common/auth.py:190 glance/common/auth.py:229 #, python-format msgid "Unexpected response: %s" msgstr "" #: glance/common/auth.py:257 #, python-format msgid "Unknown auth strategy '%s'" msgstr "" #: glance/common/client.py:243 msgid "" "You have selected to use SSL in connecting, and you have supplied a cert," " however you have failed to supply either a key_file parameter or set the" " GLANCE_CLIENT_KEY_FILE environ variable" msgstr "" #: glance/common/client.py:251 msgid "" "You have selected to use SSL in connecting, and you have supplied a key, " "however you have failed to supply either a cert_file parameter or set the" " GLANCE_CLIENT_CERT_FILE environ variable" msgstr "" #: glance/common/client.py:260 #, python-format msgid "The key file you specified %s does not exist" msgstr "" #: glance/common/client.py:267 #, python-format msgid "The cert file you specified %s does not exist" msgstr "" #: glance/common/client.py:274 #, python-format msgid "The CA file you specified %s does not exist" msgstr "" #: glance/common/client.py:397 #, python-format msgid "Constructed URL: %s" msgstr "" #: glance/common/config.py:35 msgid "" "Partial name of a pipeline in your paste configuration file with the " "service name removed. For example, if your paste section name is " "[pipeline:glance-api-keystone] use the value \"keystone\"" msgstr "" #: glance/common/config.py:41 msgid "Name of the paste configuration file." msgstr "" #: glance/common/config.py:46 msgid "Supported values for the 'container_format' image attribute" msgstr "" #: glance/common/config.py:53 msgid "Supported values for the 'disk_format' image attribute" msgstr "" #: glance/common/config.py:61 msgid "Time in hours for which a task lives after, either succeeding or failing" msgstr "" #: glance/common/config.py:67 msgid "Specifies which task executor to be used to run the task scripts." msgstr "" #: glance/common/config.py:70 msgid "" "Work dir for asynchronous task operations. The directory set here will be" " used to operate over images - normally before they are imported in the " "destination store. When providing work dir, make sure enough space is " "provided for concurrent tasks to run efficiently without running out of " "space. A rough estimation can be done by multiplying the number of " "`max_workers` - or the N of workers running - by an average image size " "(e.g 500MB). The image size estimation should be done based on the " "average size in your deployment. Note that depending on the tasks running" " you may need to multiply this number by some factor depending on what " "the task does. For example, you may want to double the available size if " "image conversion is enabled. All this being said, remember these are just" " estimations and you should do them based on the worst case scenario and " "be prepared to act in case they were wrong." msgstr "" #: glance/common/config.py:91 msgid "" "Whether to allow users to specify image properties beyond what the image " "schema provides" msgstr "" #: glance/common/config.py:94 msgid "" "Maximum number of image members per image. Negative values evaluate to " "unlimited." msgstr "" #: glance/common/config.py:97 msgid "" "Maximum number of properties allowed on an image. Negative values " "evaluate to unlimited." msgstr "" #: glance/common/config.py:100 msgid "" "Maximum number of tags allowed on an image. Negative values evaluate to " "unlimited." msgstr "" #: glance/common/config.py:103 msgid "" "Maximum number of locations allowed on an image. Negative values evaluate" " to unlimited." msgstr "" #: glance/common/config.py:106 msgid "Python module path of data access API" msgstr "" #: glance/common/config.py:108 msgid "" "Default value for the number of items returned by a request if not " "specified explicitly in the request" msgstr "" #: glance/common/config.py:111 msgid "Maximum permissible number of items that could be returned by a request" msgstr "" #: glance/common/config.py:114 msgid "" "Whether to include the backend image storage location in image " "properties. Revealing storage location can be a security risk, so use " "this setting with caution!" msgstr "" #: glance/common/config.py:119 msgid "" "Whether to include the backend image locations in image properties. For " "example, if using the file system store a URL of " "\"file:///path/to/image\" will be returned to the user in the " "'direct_url' meta-data field. Revealing storage location can be a " "security risk, so use this setting with caution! Setting this to true " "overrides the show_image_direct_url option." msgstr "" #: glance/common/config.py:131 msgid "" "Maximum size of image a user can upload in bytes. Defaults to " "1099511627776 bytes (1 TB).WARNING: this value should only be increased " "after careful consideration and must be set to a value under 8 EB " "(9223372036854775808)." msgstr "" #: glance/common/config.py:137 msgid "" "Set a system wide quota for every user. This value is the total capacity " "that a user can use across all storage systems. A value of 0 means " "unlimited.Optional unit can be specified for the value. Accepted units " "are B, KB, MB, GB and TB representing Bytes, KiloBytes, MegaBytes, " "GigaBytes and TeraBytes respectively. If no unit is specified then Bytes " "is assumed. Note that there should not be any space between value and " "unit and units are case sensitive." msgstr "" #: glance/common/config.py:147 msgid "Deploy the v1 OpenStack Images API." msgstr "" #: glance/common/config.py:149 msgid "Deploy the v2 OpenStack Images API." msgstr "" #: glance/common/config.py:151 msgid "Deploy the v1 OpenStack Registry API." msgstr "" #: glance/common/config.py:153 msgid "Deploy the v2 OpenStack Registry API." msgstr "" #: glance/common/config.py:155 msgid "The hostname/IP of the pydev process listening for debug connections" msgstr "" #: glance/common/config.py:158 msgid "The port on which a pydev process is listening for connections." msgstr "" #: glance/common/config.py:161 msgid "" "AES key for encrypting store 'location' metadata. This includes, if used," " Swift or S3 credentials. Should be set to a random string of length 16, " "24 or 32 bytes" msgstr "" #: glance/common/config.py:166 msgid "" "Digest algorithm which will be used for digital signature. Use the " "command \"openssl list-message-digest-algorithms\" to get the available " "algorithms supported by the version of OpenSSL on the platform. Examples " "are \"sha1\", \"sha256\", \"sha512\", etc." msgstr "" #: glance/common/config.py:228 #, python-format msgid "Unable to locate paste config file for %s." msgstr "" #: glance/common/config.py:267 #, python-format msgid "" "Unable to load %(app_name)s from configuration file %(conf_file)s.\n" "Got: %(e)r" msgstr "" #: glance/common/exception.py:40 msgid "An unknown exception occurred" msgstr "" #: glance/common/exception.py:65 #, python-format msgid "Missing required credential: %(required)s" msgstr "" #: glance/common/exception.py:69 #, python-format msgid "" "Incorrect auth strategy, expected \"%(expected)s\" but received " "\"%(received)s\"" msgstr "" #: glance/common/exception.py:74 msgid "An object with the specified identifier was not found." msgstr "" #: glance/common/exception.py:78 msgid "The Store URI was malformed." msgstr "" #: glance/common/exception.py:82 msgid "An object with the same identifier already exists." msgstr "" #: glance/common/exception.py:86 msgid "An object with the same identifier is currently being operated on." msgstr "" #: glance/common/exception.py:91 #, python-format msgid "" "The size of the data %(image_size)s will exceed the limit. %(remaining)s " "bytes remaining." msgstr "" #: glance/common/exception.py:96 #, python-format msgid "Connect error/bad request to Auth service at URL %(url)s." msgstr "" #: glance/common/exception.py:100 #, python-format msgid "Auth service at URL %(url)s not found." msgstr "" #: glance/common/exception.py:104 msgid "Authorization failed." msgstr "" #: glance/common/exception.py:108 msgid "You are not authenticated." msgstr "" #: glance/common/exception.py:112 #, python-format msgid "Image upload problem: %s" msgstr "" #: glance/common/exception.py:116 glance/common/exception.py:120 #: glance/common/exception.py:404 msgid "You are not authorized to complete this action." msgstr "" #: glance/common/exception.py:124 #, python-format msgid "Image %(image_id)s is protected and cannot be deleted." msgstr "" #: glance/common/exception.py:128 #, python-format msgid "" "Metadata definition namespace %(namespace)s is protected and cannot be " "deleted." msgstr "" #: glance/common/exception.py:133 #, python-format msgid "" "Metadata definition property %(property_name)s is protected and cannot be" " deleted." msgstr "" #: glance/common/exception.py:138 #, python-format msgid "" "Metadata definition object %(object_name)s is protected and cannot be " "deleted." msgstr "" #: glance/common/exception.py:143 #, python-format msgid "" "Metadata definition resource-type-association %(resource_type)s is " "protected and cannot be deleted." msgstr "" #: glance/common/exception.py:148 #, python-format msgid "" "Metadata definition resource-type %(resource_type_name)s is a seeded-" "system type and cannot be deleted." msgstr "" #: glance/common/exception.py:153 #, python-format msgid "Metadata definition tag %(tag_name)s is protected and cannot be deleted." msgstr "" #: glance/common/exception.py:158 msgid "Data supplied was not valid." msgstr "" #: glance/common/exception.py:162 msgid "Sort key supplied was not valid." msgstr "" #: glance/common/exception.py:166 msgid "Sort direction supplied was not valid." msgstr "" #: glance/common/exception.py:170 msgid "Invalid configuration in property protection file." msgstr "" #: glance/common/exception.py:174 msgid "Invalid configuration in glance-swift conf file." msgstr "" #: glance/common/exception.py:178 msgid "Unable to filter using the specified operator." msgstr "" #: glance/common/exception.py:182 msgid "Unable to filter using the specified range." msgstr "" #: glance/common/exception.py:186 #, python-format msgid "Invalid value for option %(option)s: %(value)s" msgstr "" #: glance/common/exception.py:190 #, python-format msgid "Attribute '%(property)s' is read-only." msgstr "" #: glance/common/exception.py:194 #, python-format msgid "Attribute '%(property)s' is reserved." msgstr "" #: glance/common/exception.py:198 #, python-format msgid "Redirecting to %(uri)s for authorization." msgstr "" #: glance/common/exception.py:202 msgid "There was an error connecting to a server" msgstr "" #: glance/common/exception.py:206 msgid "There was an error configuring the client." msgstr "" #: glance/common/exception.py:210 #, python-format msgid "" "The request returned a 302 Multiple Choices. This generally means that " "you have not included a version indicator in a request URI.\n" "\n" "The body of response returned:\n" "%(body)s" msgstr "" #: glance/common/exception.py:216 #, python-format msgid "" "The request returned a 413 Request Entity Too Large. This generally means" " that rate limiting or a quota threshold was breached.\n" "\n" "The response body:\n" "%(body)s" msgstr "" #: glance/common/exception.py:227 msgid "" "The request returned 503 Service Unavailable. This generally occurs on " "service overload or other transient outage." msgstr "" #: glance/common/exception.py:238 msgid "The request returned 500 Internal Server Error." msgstr "" #: glance/common/exception.py:242 #, python-format msgid "" "The request returned an unexpected status: %(status)s.\n" "\n" "The response body:\n" "%(body)s" msgstr "" #: glance/common/exception.py:247 #, python-format msgid "Invalid content type %(content_type)s" msgstr "" #: glance/common/exception.py:251 #, python-format msgid "Registry was not configured correctly on API server. Reason: %(reason)s" msgstr "" #: glance/common/exception.py:256 #, python-format msgid "" "Driver %(driver_name)s could not be configured correctly. Reason: " "%(reason)s" msgstr "" #: glance/common/exception.py:261 #, python-format msgid "Maximum redirects (%(redirects)s) was exceeded." msgstr "" #: glance/common/exception.py:265 msgid "Received invalid HTTP redirect." msgstr "" #: glance/common/exception.py:269 msgid "Response from Keystone does not contain a Glance endpoint." msgstr "" #: glance/common/exception.py:273 #, python-format msgid "" "Multiple 'image' service matches for region %(region)s. This generally " "means that a region is required and you have not supplied one." msgstr "" #: glance/common/exception.py:279 #, python-format msgid "Server worker creation failed: %(reason)s." msgstr "" #: glance/common/exception.py:283 #, python-format msgid "Unable to load schema: %(reason)s" msgstr "" #: glance/common/exception.py:287 #, python-format msgid "Provided object does not match schema '%(schema)s': %(reason)s" msgstr "" #: glance/common/exception.py:292 msgid "The provided image is too large." msgstr "" #: glance/common/exception.py:302 #, python-format msgid "" "The limit has been exceeded on the number of allowed image properties. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" #: glance/common/exception.py:307 #, python-format msgid "" "The limit has been exceeded on the number of allowed image tags. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" #: glance/common/exception.py:312 #, python-format msgid "" "The limit has been exceeded on the number of allowed image locations. " "Attempted: %(attempted)s, Maximum: %(maximum)s" msgstr "" #: glance/common/exception.py:317 msgid "System SIGHUP signal received." msgstr "" #: glance/common/exception.py:321 #, python-format msgid "%(cls)s exception was raised in the last rpc call: %(val)s" msgstr "" #: glance/common/exception.py:325 msgid "An unknown task exception occurred" msgstr "" #: glance/common/exception.py:329 msgid "Task was not configured properly" msgstr "" #: glance/common/exception.py:333 #, python-format msgid "Image with the given id %(image_id)s was not found" msgstr "" #: glance/common/exception.py:337 #, python-format msgid "Task with the given id %(task_id)s was not found" msgstr "" #: glance/common/exception.py:341 #, python-format msgid "Provided status of task is unsupported: %(status)s" msgstr "" #: glance/common/exception.py:345 #, python-format msgid "Provided type of task is unsupported: %(type)s" msgstr "" #: glance/common/exception.py:349 #, python-format msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" msgstr "" #: glance/common/exception.py:354 msgid "An import task exception occurred" msgstr "" #: glance/common/exception.py:358 #, python-format msgid "The location %(location)s already exists" msgstr "" #: glance/common/exception.py:362 #, python-format msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" msgstr "" #: glance/common/exception.py:367 #, python-format msgid "" "Image status transition from %(cur_status)s to %(new_status)s is not " "allowed" msgstr "" #: glance/common/exception.py:372 #, python-format msgid "The metadata definition namespace=%(namespace_name)s already exists." msgstr "" #: glance/common/exception.py:377 #, python-format msgid "" "A metadata definition object with name=%(object_name)s already exists in " "namespace=%(namespace_name)s." msgstr "" #: glance/common/exception.py:382 #, python-format msgid "" "A metadata definition property with name=%(property_name)s already exists" " in namespace=%(namespace_name)s." msgstr "" #: glance/common/exception.py:387 #, python-format msgid "" "A metadata definition resource-type with name=%(resource_type_name)s " "already exists." msgstr "" #: glance/common/exception.py:392 #, python-format msgid "" "The metadata definition resource-type association of resource-" "type=%(resource_type_name)s to namespace=%(namespace_name)s already " "exists." msgstr "" #: glance/common/exception.py:399 #, python-format msgid "" "A metadata tag with name=%(name)s already exists in " "namespace=%(namespace_name)s." msgstr "" #: glance/common/exception.py:408 #, python-format msgid "" "The metadata definition %(record_type)s with name=%(record_name)s not " "deleted. Other records still refer to it." msgstr "" #: glance/common/exception.py:414 #, python-format msgid "Metadata definition namespace=%(namespace_name)swas not found." msgstr "" #: glance/common/exception.py:419 #, python-format msgid "" "The metadata definition object with name=%(object_name)s was not found in" " namespace=%(namespace_name)s." msgstr "" #: glance/common/exception.py:425 #, python-format msgid "" "The metadata definition property with name=%(property_name)s was not " "found in namespace=%(namespace_name)s." msgstr "" #: glance/common/exception.py:431 #, python-format msgid "" "The metadata definition resource-type with name=%(resource_type_name)s, " "was not found." msgstr "" #: glance/common/exception.py:436 #, python-format msgid "" "The metadata definition resource-type association of resource-" "type=%(resource_type_name)s to namespace=%(namespace_name)s, was not " "found." msgstr "" #: glance/common/exception.py:443 #, python-format msgid "" "The metadata definition tag with name=%(name)s was not found in " "namespace=%(namespace_name)s." msgstr "" #: glance/common/exception.py:449 #, python-format msgid "Unable to verify signature: %(reason)s" msgstr "" #: glance/common/exception.py:453 #, python-format msgid "Version is invalid: %(reason)s" msgstr "" #: glance/common/exception.py:457 msgid "Invalid property definition" msgstr "" #: glance/common/exception.py:461 msgid "Invalid type definition" msgstr "" #: glance/common/exception.py:465 #, python-format msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" msgstr "" #: glance/common/exception.py:475 #, python-format msgid "Artifact with id=%(id)s was not found" msgstr "" #: glance/common/exception.py:479 #, python-format msgid "Artifact with id=%(id)s is not accessible" msgstr "" #: glance/common/exception.py:483 msgid "Artifact with the specified type, name and version already exists" msgstr "" #: glance/common/exception.py:488 #, python-format msgid "Artifact cannot change state from %(source)s to %(target)s" msgstr "" #: glance/common/exception.py:492 #, python-format msgid "" "Artifact with the specified type, name and version already has the direct" " dependency=%(dep)s" msgstr "" #: glance/common/exception.py:497 #, python-format msgid "" "Artifact with the specified type, name and version already has the " "transitive dependency=%(dep)s" msgstr "" #: glance/common/exception.py:502 msgid "Artifact with a circular dependency can not be created" msgstr "" #: glance/common/exception.py:506 #, python-format msgid "Operator %(op)s is not supported" msgstr "" #: glance/common/exception.py:510 #, python-format msgid "Show level %(shl)s is not supported in this operation" msgstr "" #: glance/common/exception.py:514 #, python-format msgid "Property's %(prop)s value has not been found" msgstr "" #: glance/common/exception.py:518 #, python-format msgid "Artifact has no property %(prop)s" msgstr "" #: glance/common/exception.py:522 #, python-format msgid "Cannot use this parameter with the operator %(op)s" msgstr "" #: glance/common/exception.py:526 #, python-format msgid "Cannot load artifact '%(name)s'" msgstr "" #: glance/common/exception.py:530 #, python-format msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" msgstr "" #: glance/common/exception.py:535 #, python-format msgid "No plugin for '%(name)s' has been loaded" msgstr "" #: glance/common/exception.py:539 #, python-format msgid "Artifact type with name '%(name)s' and version '%(version)s' is not known" msgstr "" #: glance/common/exception.py:544 #, python-format msgid "Artifact state cannot be changed from %(curr)s to %(to)s" msgstr "" #: glance/common/exception.py:548 msgid "Invalid jsonpatch request" msgstr "" #: glance/common/exception.py:552 #, python-format msgid "The provided body %(body)s is invalid under given schema: %(schema)s" msgstr "" #: glance/common/exception.py:557 #, python-format msgid "The provided path '%(path)s' is invalid: %(explanation)s" msgstr "" #: glance/common/jsonpatchvalidator.py:103 msgid "" "Json path should start with a '/', end with no '/', no 2 subsequent '/' " "are allowed." msgstr "" #: glance/common/jsonpatchvalidator.py:107 msgid "" "Pointer contains '~' which is not part of a recognized escape sequence " "[~0, ~1]." msgstr "" #: glance/common/property_utils.py:31 msgid "" "The location of the property protection file.This file contains the rules" " for property protections and the roles/policies associated with it. If " "this config value is not specified, by default, property protections " "won't be enforced. If a value is specified and the file is not found, " "then the glance-api service will not start." msgstr "" #: glance/common/property_utils.py:41 msgid "" "This config value indicates whether \"roles\" or \"policies\" are used in" " the property protection file." msgstr "" #: glance/common/property_utils.py:128 #, python-format msgid "" "Property protection on operation %(operation)s for rule %(rule)s is not " "found. No role will be allowed to perform this operation." msgstr "" #: glance/common/rpc.py:143 msgid "Request must be a list of commands" msgstr "" #: glance/common/rpc.py:148 #, python-format msgid "Bad Command: %s" msgstr "" #: glance/common/rpc.py:155 #, python-format msgid "Wrong command structure: %s" msgstr "" #: glance/common/rpc.py:164 msgid "Command not found" msgstr "" #: glance/common/semver_db.py:77 #, python-format msgid "Version component is too large (%d max)" msgstr "" #: glance/common/semver_db.py:125 #, python-format msgid "Prerelease numeric component is too large (%d characters max)" msgstr "" #: glance/common/signature_utils.py:138 #, python-format msgid "Invalid signature key type: %s" msgstr "" #: glance/common/signature_utils.py:161 #, python-format msgid "Invalid mask_gen_algorithm: %s" msgstr "" #: glance/common/signature_utils.py:174 #, python-format msgid "Invalid pss_salt_length: %s" msgstr "" #: glance/common/signature_utils.py:261 glance/common/signature_utils.py:335 msgid "" "Required image properties for signature verification do not exist. Cannot" " verify signature." msgstr "" #: glance/common/signature_utils.py:285 glance/common/signature_utils.py:361 msgid "" "Unable to verify signature since the algorithm is unsupported on this " "system" msgstr "" #: glance/common/signature_utils.py:294 msgid "Error occurred while creating the verifier" msgstr "" #: glance/common/signature_utils.py:373 msgid "Signature verification failed." msgstr "" #: glance/common/signature_utils.py:378 msgid "Error occurred while verifying the signature" msgstr "" #: glance/common/signature_utils.py:394 msgid "The signature data was not properly encoded using base64" msgstr "" #: glance/common/signature_utils.py:410 #, python-format msgid "Invalid signature hash method: %s" msgstr "" #: glance/common/signature_utils.py:436 #, python-format msgid "Invalid public key type for signature key type: %s" msgstr "" #: glance/common/signature_utils.py:467 #, python-format msgid "Unable to retrieve certificate with ID: %s" msgstr "" #: glance/common/signature_utils.py:473 #, python-format msgid "Invalid certificate format: %s" msgstr "" #: glance/common/signature_utils.py:483 #, python-format msgid "Certificate format not supported: %s" msgstr "" #: glance/common/signature_utils.py:505 #, python-format msgid "Certificate is not valid before: %s UTC" msgstr "" #: glance/common/signature_utils.py:510 #, python-format msgid "Certificate is not valid after: %s UTC" msgstr "" #: glance/common/swift_store_utils.py:25 msgid "" "The reference to the default swift account/backing store parameters to " "use for adding new images." msgstr "" #: glance/common/swift_store_utils.py:28 msgid "" "The address where the Swift authentication service is " "listening.(deprecated)" msgstr "" #: glance/common/swift_store_utils.py:31 msgid "" "The user to authenticate against the Swift authentication service " "(deprecated)" msgstr "" #: glance/common/swift_store_utils.py:34 msgid "" "Auth key for the user authenticating against the Swift authentication " "service. (deprecated)" msgstr "" #: glance/common/swift_store_utils.py:37 msgid "The config file that has the swift account(s)configs." msgstr "" #: glance/common/utils.py:292 #, python-format msgid "Bad header: %(header_name)s" msgstr "" #: glance/common/utils.py:306 #, python-format msgid "Cannot convert image %(key)s '%(value)s' to an integer." msgstr "" #: glance/common/utils.py:313 msgid "Cannot be a negative value." msgstr "" #: glance/common/utils.py:402 #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. " "Please verify it. Error: %(ioe)s" msgstr "" #: glance/common/utils.py:409 #, python-format msgid "" "There is a problem with your %(error_key_name)s %(error_filename)s. " "Please verify it. OpenSSL error: %(ce)s" msgstr "" #: glance/common/utils.py:434 #, python-format msgid "" "There is a problem with your key pair. Please verify that cert " "%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" msgstr "" #: glance/common/utils.py:480 #, python-format msgid "Host and port \"%s\" is not valid." msgstr "" #: glance/common/utils.py:483 #, python-format msgid "Port \"%s\" is not valid." msgstr "" #: glance/common/utils.py:492 #, python-format msgid "Host \"%s\" is not valid." msgstr "" #: glance/common/utils.py:495 #, python-format msgid "" "%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " "address, hostname, or FQDN. If using an IPv6 address, enclose it in " "brackets separately from the port (i.e., \"[fe80::a:b:c]:9876\")." msgstr "" #: glance/common/utils.py:530 msgid "Property names can't contain 4 byte unicode." msgstr "" #: glance/common/utils.py:533 #, python-format msgid "%s can't contain 4 byte unicode characters." msgstr "" #: glance/common/utils.py:542 msgid "Param values can't contain 4 byte unicode." msgstr "" #: glance/common/utils.py:562 #, python-format msgid "Value %(value)d out of range, must not exceed %(max)d" msgstr "" #: glance/common/utils.py:570 #, python-format msgid "'%(param)s' value out of range, must not exceed %(max)d" msgstr "" #: glance/common/utils.py:627 #, python-format msgid "Invalid filter value %s. There is no comma before opening quotation mark." msgstr "" #: glance/common/utils.py:632 #, python-format msgid "Invalid filter value %s. There is no comma after closing quotation mark." msgstr "" #: glance/common/utils.py:637 #, python-format msgid "Invalid filter value %s. The quote is not closed." msgstr "" #: glance/common/utils.py:687 msgid "Unable to filter on a unknown operator." msgstr "" #: glance/common/wsgi.py:59 msgid "" "Address to bind the server. Useful when selecting a particular network " "interface." msgstr "" #: glance/common/wsgi.py:62 msgid "The port on which the server will listen." msgstr "" #: glance/common/wsgi.py:67 msgid "The backlog value that will be used when creating the TCP listener socket." msgstr "" #: glance/common/wsgi.py:70 msgid "" "The value for the socket option TCP_KEEPIDLE. This is the time in " "seconds that the connection must be idle before TCP starts sending " "keepalive probes." msgstr "" #: glance/common/wsgi.py:73 msgid "CA certificate file to use to verify connecting clients." msgstr "" #: glance/common/wsgi.py:75 msgid "Certificate file to use when starting API server securely." msgstr "" #: glance/common/wsgi.py:77 msgid "Private key file to use when starting API server securely." msgstr "" #: glance/common/wsgi.py:83 msgid "" "The number of child process workers that will be created to service " "requests. The default will be equal to the number of CPUs available." msgstr "" #: glance/common/wsgi.py:87 msgid "" "Maximum line size of message headers to be accepted. max_header_line may " "need to be increased when using large tokens (typically those generated " "by the Keystone v3 API with big service catalogs" msgstr "" #: glance/common/wsgi.py:92 msgid "" "If False, server will return the header \"Connection: close\", If True, " "server will return \"Connection: Keep-Alive\" in its responses. In order " "to close the client socket connection explicitly after the response is " "sent and read successfully by the client, you simply have to set this " "option to False when you create a wsgi server." msgstr "" #: glance/common/wsgi.py:101 msgid "" "Timeout for client connections' socket operations. If an incoming " "connection is idle for this number of seconds it will be closed. A value " "of '0' means wait forever." msgstr "" #: glance/common/wsgi.py:109 msgid "If False fully disable profiling feature." msgstr "" #: glance/common/wsgi.py:111 msgid "If False doesn't trace SQL requests." msgstr "" #: glance/common/wsgi.py:113 msgid "" "Secret key to use to sign Glance API and Glance Registry services tracing" " messages." msgstr "" #: glance/common/wsgi.py:192 msgid "" "When running server in SSL mode, you must specify both a cert_file and " "key_file option value in your configuration file" msgstr "" #: glance/common/wsgi.py:209 #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" msgstr "" #: glance/common/wsgi.py:224 msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" msgstr "" #: glance/common/wsgi.py:770 #, python-format msgid "Malformed Content-Range header: %s" msgstr "" #: glance/common/wsgi.py:803 msgid "Unexpected body type. Expected list/dict." msgstr "" #: glance/common/wsgi.py:807 msgid "Malformed JSON in request body." msgstr "" #: glance/common/wsgi.py:896 msgid "A body is not expected with this request." msgstr "" #: glance/common/wsgi.py:908 msgid "" "Error decoding your request. Either the URL or the request body contained" " characters that could not be decoded by Glance" msgstr "" #: glance/common/glare/declarative.py:59 msgid "Not a valid value type" msgstr "" #: glance/common/glare/declarative.py:92 msgid "Default value is invalid" msgstr "" #: glance/common/glare/declarative.py:106 #: glance/common/glare/declarative.py:712 msgid "Value is required" msgstr "" #: glance/common/glare/declarative.py:129 msgid "Invalid item type specification" msgstr "" #: glance/common/glare/declarative.py:132 msgid "List definitions may hot have defaults" msgstr "" #: glance/common/glare/declarative.py:147 msgid "Cannot specify 'min_size' explicitly" msgstr "" #: glance/common/glare/declarative.py:152 msgid "Cannot specify 'max_size' explicitly" msgstr "" #: glance/common/glare/declarative.py:177 msgid "List size is less than minimum" msgstr "" #: glance/common/glare/declarative.py:186 msgid "List size is greater than maximum" msgstr "" #: glance/common/glare/declarative.py:201 msgid "Items have to be unique" msgstr "" #: glance/common/glare/declarative.py:249 msgid "Invalid dict property type specification" msgstr "" #: glance/common/glare/declarative.py:255 msgid "Invalid dict property type" msgstr "" #: glance/common/glare/declarative.py:269 msgid "Dictionary contains unexpected key(s)" msgstr "" #: glance/common/glare/declarative.py:285 msgid "Dictionary size is less than minimum" msgstr "" #: glance/common/glare/declarative.py:295 msgid "Dictionary size is greater than maximum" msgstr "" #: glance/common/glare/declarative.py:362 msgid "Custom validators list should contain tuples '(function, message)'" msgstr "" #: glance/common/glare/declarative.py:379 #, python-format msgid "Allowed values %s are invalid under given validators" msgstr "" #: glance/common/glare/declarative.py:386 msgid "Is not allowed value" msgstr "" #: glance/common/glare/declarative.py:404 msgid "Dependency relations cannot be mutable" msgstr "" #: glance/common/glare/declarative.py:463 msgid "Attempt to set readonly property" msgstr "" #: glance/common/glare/declarative.py:471 msgid "Attempt to set value of immutable property" msgstr "" #: glance/common/glare/declarative.py:557 msgid "Type version has to be a valid semver string" msgstr "" #: glance/common/glare/declarative.py:565 #, python-format msgid "%(attribute)s is required" msgstr "" #: glance/common/glare/declarative.py:570 #, python-format msgid "%(attribute)s have to be string" msgstr "" #: glance/common/glare/declarative.py:573 #, python-format msgid "%(attribute)s may not be longer than %(length)i" msgstr "" #: glance/common/glare/declarative.py:577 #, python-format msgid "%(attribute)s may not be shorter than %(length)i" msgstr "" #: glance/common/glare/declarative.py:581 #, python-format msgid "%(attribute)s should match pattern %(pattern)s" msgstr "" #: glance/common/glare/declarative.py:625 msgid "Cannot declare artifact property with reserved name 'metadata'" msgstr "" #: glance/common/glare/declarative.py:654 msgid "Unable to modify collection in immutable or readonly property" msgstr "" #: glance/common/glare/definitions.py:71 msgid "Max string length may not exceed 255 characters" msgstr "" #: glance/common/glare/definitions.py:74 msgid "Length is greater than maximum" msgstr "" #: glance/common/glare/definitions.py:85 msgid "Min string length may not be negative" msgstr "" #: glance/common/glare/definitions.py:89 msgid "Length is less than minimum" msgstr "" #: glance/common/glare/definitions.py:101 msgid "Does not match pattern" msgstr "" #: glance/common/glare/definitions.py:158 #: glance/common/glare/definitions.py:210 #: glance/common/glare/definitions.py:260 msgid "Value is less than minimum" msgstr "" #: glance/common/glare/definitions.py:169 #: glance/common/glare/definitions.py:221 #: glance/common/glare/definitions.py:271 msgid "Value is greater than maximum" msgstr "" #: glance/common/glare/definitions.py:312 msgid "Array property can't have item_type=Array" msgstr "" #: glance/common/glare/definitions.py:384 msgid "Unable to specify artifact type explicitly" msgstr "" #: glance/common/glare/definitions.py:387 msgid "Unable to specify artifact type version explicitly" msgstr "" #: glance/common/glare/definitions.py:426 msgid "Unable to specify version if multiple types are possible" msgstr "" #: glance/common/glare/definitions.py:441 msgid "Invalid referenced type" msgstr "" #: glance/common/glare/definitions.py:444 msgid "Unable to specify version if type is not specified" msgstr "" #: glance/common/glare/definitions.py:462 msgid "Invalid reference list specification" msgstr "" #: glance/common/glare/definitions.py:522 msgid "BinaryObject property cannot be declared mutable" msgstr "" #: glance/common/glare/definitions.py:532 msgid "Blob size is not set" msgstr "" #: glance/common/glare/definitions.py:536 msgid "File too large" msgstr "" #: glance/common/glare/definitions.py:540 msgid "File too small" msgstr "" #: glance/common/glare/definitions.py:545 msgid "Too few locations" msgstr "" #: glance/common/glare/definitions.py:550 msgid "Too many locations" msgstr "" #: glance/common/glare/loader.py:31 msgid "" "When false, no artifacts can be loaded regardless of available_plugins. " "When true, artifacts can be loaded." msgstr "" #: glance/common/glare/loader.py:35 msgid "" "A list of artifacts that are allowed in the format name or name-version. " "Empty list means that any artifact can be loaded." msgstr "" #: glance/common/glare/serialization.py:192 #, python-format msgid "Blob %(name)s may not have multiple values" msgstr "" #: glance/common/glare/serialization.py:213 #, python-format msgid "Relation %(name)s may not have multiple values" msgstr "" #: glance/common/location_strategy/__init__.py:27 msgid "" "This value sets what strategy will be used to determine the image " "location order. Currently two strategies are packaged with Glance " "'location_order' and 'store_type'." msgstr "" #: glance/common/location_strategy/__init__.py:54 #, python-format msgid "" "%(strategy)s is registered as a module twice. %(module)s is not being " "used." msgstr "" #: glance/common/location_strategy/__init__.py:79 #, python-format msgid "" "Invalid location_strategy option: %(name)s. The valid strategy option(s) " "is(are): %(strategies)s" msgstr "" #: glance/common/location_strategy/store_type.py:28 msgid "" "The store names to use to get store preference order. The name must be " "registered by one of the stores defined by the 'stores' config option. " "This option will be applied when you using 'store_type' option as image " "location strategy defined by the 'location_strategy' config option." msgstr "" #: glance/common/scripts/utils.py:61 #, python-format msgid "Input does not contain '%(key)s' field" msgstr "" #: glance/common/scripts/utils.py:95 msgid "" "File based imports are not allowed. Please use a non-local source of " "image data." msgstr "" #: glance/common/scripts/utils.py:104 #, python-format msgid "" "The given uri is not valid. Please specify a valid uri from the following" " list of supported uri %(supported)s" msgstr "" #: glance/common/scripts/image_import/main.py:103 #, python-format msgid "" "The Image %(image_id)s object being created by this task %(task_id)s, is " "no longer in valid status for further processing." msgstr "" #: glance/contrib/plugins/image_artifact/v2/image.py:44 msgid "Either a file or a legacy_image_id has to be specified" msgstr "" #: glance/contrib/plugins/image_artifact/v2/image.py:49 msgid "Both file and legacy_image_id may not be specified at the same time" msgstr "" #: glance/contrib/plugins/image_artifact/v2/image.py:59 msgid "Glance client not installed" msgstr "" #: glance/contrib/plugins/image_artifact/v2/image.py:68 msgid "Unable to get legacy image" msgstr "" #: glance/contrib/plugins/image_artifact/v2/image.py:82 msgid "Legacy image was not found" msgstr "" #: glance/db/__init__.py:85 glance/db/__init__.py:127 glance/db/__init__.py:137 #: glance/db/__init__.py:151 #, python-format msgid "No artifact found with ID %s" msgstr "" #: glance/db/__init__.py:167 glance/db/__init__.py:279 #: glance/db/__init__.py:291 #, python-format msgid "No image found with ID %s" msgstr "" #: glance/db/__init__.py:345 #, python-format msgid "" "The target member %(member_id)s is already associated with image " "%(image_id)s." msgstr "" #: glance/db/__init__.py:376 #, python-format msgid "The specified member %s could not be found" msgstr "" #: glance/db/__init__.py:454 glance/db/__init__.py:475 #: glance/db/__init__.py:492 #, python-format msgid "Could not find task %s" msgstr "" #: glance/db/__init__.py:539 #, python-format msgid "Could not find namespace %s" msgstr "" #: glance/db/__init__.py:561 glance/db/__init__.py:571 #: glance/db/__init__.py:581 glance/db/__init__.py:591 #, python-format msgid "The specified namespace %s could not be found" msgstr "" #: glance/db/__init__.py:682 #, python-format msgid "Could not find metadata object %s" msgstr "" #: glance/db/__init__.py:705 #, python-format msgid "The specified metadata object %s could not be found" msgstr "" #: glance/db/__init__.py:798 #, python-format msgid "The specified resource type %s could not be found " msgstr "" #: glance/db/__init__.py:845 #, python-format msgid "Could not find property %s" msgstr "" #: glance/db/__init__.py:867 #, python-format msgid "The specified property %s could not be found" msgstr "" #: glance/db/__init__.py:931 #, python-format msgid "Could not find metadata tag %s" msgstr "" #: glance/db/__init__.py:957 #, python-format msgid "The specified metadata tag %s could not be found" msgstr "" #: glance/db/simple/api.py:293 glance/db/sqlalchemy/api.py:490 msgid "Unable to filter on a range with a non-numeric value." msgstr "" #: glance/db/simple/api.py:317 glance/db/sqlalchemy/api.py:523 #, python-format msgid "Unable to filter by unknown operator '%s'." msgstr "" #: glance/db/simple/api.py:509 glance/db/sqlalchemy/api.py:1180 msgid "Image id is required." msgstr "" #: glance/db/simple/api.py:570 glance/db/sqlalchemy/api.py:871 #, python-format msgid "The location data has an invalid ID: %d" msgstr "" #: glance/db/simple/api.py:590 glance/db/simple/api.py:616 #: glance/db/sqlalchemy/api.py:891 glance/db/sqlalchemy/api.py:917 #, python-format msgid "No location found with ID %(loc)s from image %(img)s" msgstr "" #: glance/db/simple/api.py:600 msgid "" "The status of deleted image location can only be set to 'pending_delete' " "or 'deleted'." msgstr "" #: glance/db/simple/api.py:941 #, python-format msgid "Forbidding request, task %s is not visible" msgstr "" #: glance/db/simple/api.py:1056 msgid "Task does not exist" msgstr "" #: glance/db/simple/api.py:1155 #: glance/db/sqlalchemy/metadef_api/namespace.py:249 #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition " "namespace with the same name of %s" msgstr "" #: glance/db/simple/api.py:1176 #: glance/db/sqlalchemy/metadef_api/namespace.py:88 #, python-format msgid "Metadata definition namespace not found for id=%s" msgstr "" #: glance/db/simple/api.py:1184 glance/db/simple/api.py:2128 #: glance/db/sqlalchemy/metadef_api/namespace.py:97 #: glance/db/sqlalchemy/metadef_api/namespace.py:119 #, python-format msgid "Forbidding request, metadata definition namespace=%s is not visible." msgstr "" #: glance/db/simple/api.py:1307 glance/db/sqlalchemy/metadef_api/object.py:35 #, python-format msgid "Metadata definition object not found for id=%s" msgstr "" #: glance/db/simple/api.py:1388 #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object" " with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" #: glance/db/simple/api.py:1515 #: glance/db/sqlalchemy/metadef_api/property.py:122 #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition " "property with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" #: glance/db/simple/api.py:1559 glance/db/sqlalchemy/metadef_api/property.py:37 #, python-format msgid "Metadata definition property not found for id=%s" msgstr "" #: glance/db/simple/api.py:1765 #, python-format msgid "Metadata definition tag not found for id=%s" msgstr "" #: glance/db/sqlalchemy/api.py:117 msgid "You do not own this image" msgstr "" #: glance/db/sqlalchemy/api.py:416 msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "" #: glance/db/sqlalchemy/api.py:505 #, python-format msgid "Bad \"%s\" query filter format. Use ISO 8601 DateTime notation." msgstr "" #: glance/db/sqlalchemy/api.py:820 #: glance/tests/unit/v2/test_registry_client.py:582 #, python-format msgid "" "cannot transition from %(current)s to %(next)s in update (wanted " "from_state=%(from)s)" msgstr "" #: glance/db/sqlalchemy/api.py:900 msgid "" "The status of deleted image location can only be set to 'pending_delete' " "or 'deleted'" msgstr "" #: glance/db/sqlalchemy/metadata.py:42 msgid "Path to the directory where json metadata files are stored" msgstr "" #: glance/db/sqlalchemy/metadef_api/object.py:112 #, python-format msgid "" "Invalid update. It would result in a duplicate metadata definition object" " with the same name=%(name)s in namespace=%(namespace_name)s." msgstr "" #: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:89 #: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:152 msgid "" "URI cannot contain more than one occurrence of a scheme.If you have " "specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj, you need to change" " it to use the swift+http:// scheme, like so: " "swift+http://user:pass@authurl.com/v1/container/obj" msgstr "" #: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:128 #: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:190 #, python-format msgid "Badly formed credentials '%(creds)s' in Swift URI" msgstr "" #: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:140 #: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:202 msgid "Badly formed credentials in Swift URI." msgstr "" #: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:157 #: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:219 #, python-format msgid "Badly formed S3 URI: %(uri)s" msgstr "" #: glance/domain/__init__.py:62 #, python-format msgid "new_image() got unexpected keywords %s" msgstr "" #: glance/domain/__init__.py:134 glance/glare/domain/__init__.py:43 #, python-format msgid "__init__() got unexpected keyword argument '%s'" msgstr "" #: glance/domain/__init__.py:156 #, python-format msgid "Property %s must be set prior to saving data." msgstr "" #: glance/domain/__init__.py:159 #, python-format msgid "Properties %s must be set prior to saving data." msgstr "" #: glance/domain/__init__.py:176 msgid "Visibility must be either \"public\" or \"private\"" msgstr "" #: glance/domain/__init__.py:195 msgid "Attribute container_format can be only replaced for a queued image." msgstr "" #: glance/domain/__init__.py:207 msgid "Attribute disk_format can be only replaced for a queued image." msgstr "" #: glance/domain/__init__.py:219 glance/domain/__init__.py:232 msgid "Cannot be a negative value" msgstr "" #: glance/domain/__init__.py:255 #, python-format msgid "Not allowed to deactivate image in status '%s'" msgstr "" #: glance/domain/__init__.py:268 #, python-format msgid "Not allowed to reactivate image in status '%s'" msgstr "" #: glance/domain/__init__.py:323 msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." msgstr "" #: glance/glare/dependency.py:66 #, python-format msgid "Not all dependencies are in '%s' state" msgstr "" #: glance/glare/dependency.py:98 #, python-format msgid "Dependency property '%s' has to be deleted first" msgstr "" #: glance/glare/location.py:159 #, python-format msgid "Get blob %(name)s data failed: %(err)s." msgstr "" #: glance/glare/updater.py:52 msgid "No property to access" msgstr "" #: glance/glare/updater.py:130 #, python-format msgid "Array has no element at position %d" msgstr "" #: glance/glare/updater.py:134 #, python-format msgid "Not an array idx '%s'" msgstr "" #: glance/glare/updater.py:174 #, python-format msgid "No such key '%s' in a dict" msgstr "" #: glance/image_cache/__init__.py:37 msgid "The driver to use for image cache management." msgstr "" #: glance/image_cache/__init__.py:39 msgid "" "The upper limit (the maximum size of accumulated cache in bytes) beyond " "which the cache pruner, if running, starts cleaning the image cache." msgstr "" #: glance/image_cache/__init__.py:43 msgid "" "The amount of time to let an incomplete image remain in the cache, before" " the cache cleaner, if running, will remove the incomplete image." msgstr "" #: glance/image_cache/__init__.py:47 msgid "Base directory that the image cache uses." msgstr "" #: glance/image_cache/__init__.py:258 #, python-format msgid "Checksum verification failed. Aborted caching of image '%s'." msgstr "" #: glance/image_cache/client.py:119 msgid "" "--os_auth_url option or OS_AUTH_URL environment variable required when " "keystone authentication strategy is enabled\n" msgstr "" #: glance/image_cache/drivers/base.py:55 #, python-format msgid "Failed to read %s from config" msgstr "" #: glance/image_cache/drivers/sqlite.py:41 msgid "" "The path to the sqlite file database that will be used for image cache " "management." msgstr "" #: glance/image_cache/drivers/sqlite.py:122 #, python-format msgid "Failed to initialize the image cache database. Got error: %s" msgstr "" #: glance/image_cache/drivers/xattr.py:107 #, python-format msgid "" "The device housing the image cache directory %(image_cache_dir)s does not" " support xattr. It is likely you need to edit your fstab and add the " "user_xattr option to the appropriate line for the device housing the " "cache directory." msgstr "" #: glance/registry/__init__.py:27 msgid "Address to find the registry server." msgstr "" #: glance/registry/__init__.py:29 msgid "Port the registry server is listening on." msgstr "" #: glance/registry/api/v1/images.py:126 glance/registry/api/v1/images.py:131 msgid "Invalid marker. Image could not be found." msgstr "" #: glance/registry/api/v1/images.py:237 msgid "Unrecognized changes-since value" msgstr "" #: glance/registry/api/v1/images.py:242 msgid "protected must be True, or False" msgstr "" #: glance/registry/api/v1/images.py:289 #, python-format msgid "Unsupported sort_key. Acceptable values: %s" msgstr "" #: glance/registry/api/v1/images.py:298 #, python-format msgid "Unsupported sort_dir. Acceptable values: %s" msgstr "" #: glance/registry/api/v1/images.py:324 msgid "is_public must be None, True, or False" msgstr "" #: glance/registry/api/v1/images.py:410 msgid "Invalid image id format" msgstr "" #: glance/registry/api/v1/images.py:424 #, python-format msgid "Image with identifier %s already exists!" msgstr "" #: glance/registry/api/v1/images.py:428 #, python-format msgid "Failed to add image metadata. Got error: %s" msgstr "" #: glance/registry/api/v1/members.py:68 glance/registry/api/v1/members.py:102 #: glance/registry/api/v1/members.py:222 glance/registry/api/v1/members.py:286 #, python-format msgid "Image %(id)s not found" msgstr "" #: glance/registry/api/v1/members.py:118 glance/registry/api/v1/members.py:238 #: glance/registry/api/v1/members.py:302 msgid "No permission to share that image" msgstr "" #: glance/registry/api/v1/members.py:129 glance/registry/api/v1/members.py:146 #: glance/registry/api/v1/members.py:251 #, python-format msgid "Invalid membership association: %s" msgstr "" #: glance/registry/api/v1/members.py:314 glance/registry/api/v1/members.py:338 msgid "Membership could not be found." msgstr "" #: glance/registry/api/v2/rpc.py:40 #, python-format msgid "Registry service can't use %s" msgstr "" #: glance/registry/client/__init__.py:23 msgid "" "The protocol to use for communication with the registry server. Either " "http or https." msgstr "" #: glance/registry/client/__init__.py:26 msgid "" "The path to the key file to use in SSL connections to the registry " "server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE " "environment variable to a filepath of the key file" msgstr "" #: glance/registry/client/__init__.py:31 msgid "" "The path to the cert file to use in SSL connections to the registry " "server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE " "environment variable to a filepath of the CA cert file" msgstr "" #: glance/registry/client/__init__.py:36 msgid "" "The path to the certifying authority cert file to use in SSL connections " "to the registry server, if any. Alternately, you may set the " "GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert " "file." msgstr "" #: glance/registry/client/__init__.py:42 msgid "" "When using SSL in connections to the registry server, do not require " "validation via a certifying authority. This is the registry's equivalent " "of specifying --insecure on the command line using glanceclient for the " "API." msgstr "" #: glance/registry/client/__init__.py:48 msgid "" "The period of time, in seconds, that the API server will wait for a " "registry request to complete. A value of 0 implies no timeout." msgstr "" #: glance/registry/client/__init__.py:64 msgid "" "Whether to pass through the user token when making requests to the " "registry. To prevent failures with token expiration during big files " "upload, it is recommended to set this parameter to False.If " "\"use_user_token\" is not in effect, then admin credentials can be " "specified." msgstr "" #: glance/registry/client/__init__.py:73 msgid "" "The administrators user name. If \"use_user_token\" is not in effect, " "then admin credentials can be specified." msgstr "" #: glance/registry/client/__init__.py:78 msgid "" "The administrators password. If \"use_user_token\" is not in effect, then" " admin credentials can be specified." msgstr "" #: glance/registry/client/__init__.py:83 msgid "" "The tenant name of the administrative user. If \"use_user_token\" is not " "in effect, then admin tenant name can be specified." msgstr "" #: glance/registry/client/__init__.py:88 msgid "" "The URL to the keystone service. If \"use_user_token\" is not in effect " "and using keystone auth, then URL of keystone can be specified." msgstr "" #: glance/registry/client/__init__.py:94 msgid "" "The strategy to use for authentication. If \"use_user_token\" is not in " "effect, then auth strategy can be specified." msgstr "" #: glance/registry/client/__init__.py:99 msgid "" "The region for the authentication service. If \"use_user_token\" is not " "in effect and using keystone auth, then region name can be specified." msgstr "" #: glance/registry/client/v1/api.py:79 glance/registry/client/v2/api.py:61 msgid "Configuration option was not valid" msgstr "" #: glance/registry/client/v1/api.py:83 glance/registry/client/v2/api.py:65 msgid "Could not find required configuration option" msgstr "" #: glance/tests/unit/test_migrations.py:619 #, python-format msgid "location: %s data lost" msgstr "" glance-12.0.0/glance/context.py0000664000567000056710000000440012701407051017446 0ustar jenkinsjenkins00000000000000# Copyright 2011-2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_context import context from glance.api import policy class RequestContext(context.RequestContext): """Stores information about the security context. Stores how the user accesses the system, as well as additional request information. """ def __init__(self, roles=None, owner_is_tenant=True, service_catalog=None, policy_enforcer=None, **kwargs): super(RequestContext, self).__init__(**kwargs) self.roles = roles or [] self.owner_is_tenant = owner_is_tenant self.service_catalog = service_catalog self.policy_enforcer = policy_enforcer or policy.Enforcer() if not self.is_admin: self.is_admin = self.policy_enforcer.check_is_admin(self) def to_dict(self): d = super(RequestContext, self).to_dict() d.update({ 'roles': self.roles, 'service_catalog': self.service_catalog, }) return d @classmethod def from_dict(cls, values): return cls(**values) @property def owner(self): """Return the owner to correlate with an image.""" return self.tenant if self.owner_is_tenant else self.user @property def can_see_deleted(self): """Admins can see deleted by default""" return self.show_deleted or self.is_admin def get_admin_context(show_deleted=False): """Create an administrator context.""" return RequestContext(auth_token=None, tenant=None, is_admin=True, show_deleted=show_deleted, overwrite=False) glance-12.0.0/glance/hacking/0000775000567000056710000000000012701407204017016 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/hacking/__init__.py0000664000567000056710000000000012701407047021122 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/hacking/checks.py0000664000567000056710000001515612701407047020645 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import pep8 """ Guidelines for writing new hacking checks - Use only for Glance-specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range G3xx. Find the current test with the highest allocated number and then pick the next value. If nova has an N3xx code for that test, use the same number. - Keep the test method code in the source file ordered based on the G3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to glance/tests/test_hacking.py """ asse_trueinst_re = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " "(\w|\.|\'|\"|\[|\])+\)\)") asse_equal_type_re = re.compile( r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " "(\w|\.|\'|\"|\[|\])+\)") asse_equal_end_with_none_re = re.compile( r"(.)*assertEqual\((\w|\.|\'|\"|\[|\])+, None\)") asse_equal_start_with_none_re = re.compile( r"(.)*assertEqual\(None, (\w|\.|\'|\"|\[|\])+\)") unicode_func_re = re.compile(r"(\s|\W|^)unicode\(") log_translation = re.compile( r"(.)*LOG\.(audit)\(\s*('|\")") log_translation_info = re.compile( r"(.)*LOG\.(info)\(\s*(_\(|'|\")") log_translation_exception = re.compile( r"(.)*LOG\.(exception)\(\s*(_\(|'|\")") log_translation_error = re.compile( r"(.)*LOG\.(error)\(\s*(_\(|'|\")") log_translation_critical = re.compile( r"(.)*LOG\.(critical)\(\s*(_\(|'|\")") log_translation_warning = re.compile( r"(.)*LOG\.(warning)\(\s*(_\(|'|\")") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") def assert_true_instance(logical_line): """Check for assertTrue(isinstance(a, b)) sentences G316 """ if asse_trueinst_re.match(logical_line): yield (0, "G316: assertTrue(isinstance(a, b)) sentences not allowed") def assert_equal_type(logical_line): """Check for assertEqual(type(A), B) sentences G317 """ if asse_equal_type_re.match(logical_line): yield (0, "G317: assertEqual(type(A), B) sentences not allowed") def assert_equal_none(logical_line): """Check for assertEqual(A, None) or assertEqual(None, A) sentences G318 """ res = (asse_equal_start_with_none_re.match(logical_line) or asse_equal_end_with_none_re.match(logical_line)) if res: yield (0, "G318: assertEqual(A, None) or assertEqual(None, A) " "sentences not allowed") def no_translate_debug_logs(logical_line, filename): dirs = [ "glance/api", "glance/cmd", "glance/common", "glance/db", "glance/domain", "glance/image_cache", "glance/quota", "glance/registry", "glance/store", "glance/tests", ] if max([name in filename for name in dirs]): if logical_line.startswith("LOG.debug(_("): yield(0, "G319: Don't translate debug level logs") def no_direct_use_of_unicode_function(logical_line): """Check for use of unicode() builtin G320 """ if unicode_func_re.match(logical_line): yield(0, "G320: Use six.text_type() instead of unicode()") def validate_log_translations(logical_line, physical_line, filename): # Translations are not required in the test directory if pep8.noqa(physical_line): return msg = "G322: LOG.info messages require translations `_LI()`!" if log_translation_info.match(logical_line): yield (0, msg) msg = "G323: LOG.exception messages require translations `_LE()`!" if log_translation_exception.match(logical_line): yield (0, msg) msg = "G324: LOG.error messages require translations `_LE()`!" if log_translation_error.match(logical_line): yield (0, msg) msg = "G325: LOG.critical messages require translations `_LC()`!" if log_translation_critical.match(logical_line): yield (0, msg) msg = "G326: LOG.warning messages require translations `_LW()`!" if log_translation_warning.match(logical_line): yield (0, msg) msg = "G321: Log messages require translations!" if log_translation.match(logical_line): yield (0, msg) def check_no_contextlib_nested(logical_line): msg = ("G327: contextlib.nested is deprecated since Python 2.7. See " "https://docs.python.org/2/library/contextlib.html#contextlib." "nested for more information.") if ("with contextlib.nested(" in logical_line or "with nested(" in logical_line): yield(0, msg) def dict_constructor_with_list_copy(logical_line): msg = ("G328: Must use a dict comprehension instead of a dict constructor " "with a sequence of key-value pairs.") if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) def check_python3_xrange(logical_line): if re.search(r"\bxrange\s*\(", logical_line): yield(0, "G329: Do not use xrange. Use range, or six.moves.range for " "large loops.") def check_python3_no_iteritems(logical_line): msg = ("G330: Use six.iteritems() or dict.items() instead of " "dict.iteritems().") if re.search(r".*\.iteritems\(\)", logical_line): yield(0, msg) def check_python3_no_iterkeys(logical_line): msg = ("G331: Use six.iterkeys() or dict.keys() instead of " "dict.iterkeys().") if re.search(r".*\.iterkeys\(\)", logical_line): yield(0, msg) def check_python3_no_itervalues(logical_line): msg = ("G332: Use six.itervalues() or dict.values instead of " "dict.itervalues().") if re.search(r".*\.itervalues\(\)", logical_line): yield(0, msg) def factory(register): register(assert_true_instance) register(assert_equal_type) register(assert_equal_none) register(no_translate_debug_logs) register(no_direct_use_of_unicode_function) register(validate_log_translations) register(check_no_contextlib_nested) register(dict_constructor_with_list_copy) register(check_python3_xrange) register(check_python3_no_iteritems) register(check_python3_no_iterkeys) register(check_python3_no_itervalues) glance-12.0.0/glance/async/0000775000567000056710000000000012701407204016527 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/async/utils.py0000664000567000056710000000565112701407047020255 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import encodeutils from taskflow import task from glance.i18n import _LW LOG = logging.getLogger(__name__) class OptionalTask(task.Task): def __init__(self, *args, **kwargs): super(OptionalTask, self).__init__(*args, **kwargs) self.execute = self._catch_all(self.execute) def _catch_all(self, func): # NOTE(flaper87): Read this comment before calling the MI6 # Here's the thing, there's no nice way to define "optional" # tasks. That is, tasks whose failure shouldn't affect the execution # of the flow. The only current "sane" way to do this, is by catching # everything and logging. This seems harmless from a taskflow # perspective but it is not. There are some issues related to this # "workaround": # # - Task's states will shamelessly lie to us saying the task succeeded. # # - No revert procedure will be triggered, which means optional tasks, # for now, mustn't cause any side-effects because they won't be able to # clean them up. If these tasks depend on other task that do cause side # effects, a task that cleans those side effects most be registered as # well. For example, _ImportToFS, _MyDumbTask, _DeleteFromFS. # # - Ideally, optional tasks shouldn't `provide` new values unless they # are part of an optional flow. Due to the decoration of the execute # method, these tasks will need to define the provided methods at # class level using `default_provides`. # # # The taskflow team is working on improving this and on something that # will provide the ability of defining optional tasks. For now, to lie # ourselves we must. # # NOTE(harlowja): The upstream change that is hopefully going to make # this easier/built-in is at: https://review.openstack.org/#/c/271116/ def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exc: msg = (_LW("An optional task has failed, " "the failure was: %s") % encodeutils.exception_to_unicode(exc)) LOG.warn(msg) return wrapper glance-12.0.0/glance/async/__init__.py0000664000567000056710000000533512701407047020653 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from glance.i18n import _LE LOG = logging.getLogger(__name__) class TaskExecutor(object): """Base class for Asynchronous task executors. It does not support the execution mechanism. Provisions the extensible classes with necessary variables to utilize important Glance modules like, context, task_repo, image_repo, image_factory. Note: It also gives abstraction for the standard pre-processing and post-processing operations to be executed by a task. These may include validation checks, security checks, introspection, error handling etc. The aim is to give developers an abstract sense of the execution pipeline logic. Args: context: glance.context.RequestContext object for AuthZ and AuthN checks task_repo: glance.db.TaskRepo object which acts as a translator for glance.domain.Task and glance.domain.TaskStub objects into ORM semantics image_repo: glance.db.ImageRepo object which acts as a translator for glance.domain.Image object into ORM semantics image_factory: glance.domain.ImageFactory object to be used for creating new images for certain types of tasks viz. import, cloning """ def __init__(self, context, task_repo, image_repo, image_factory): self.context = context self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory def begin_processing(self, task_id): task = self.task_repo.get(task_id) task.begin_processing() self.task_repo.save(task) # start running self._run(task_id, task.type) def _run(self, task_id, task_type): task = self.task_repo.get(task_id) msg = _LE("This execution of Tasks is not setup. Please consult the " "project documentation for more information on the " "executors available.") LOG.error(msg) task.fail(_LE("Internal error occurred while trying to process task.")) self.task_repo.save(task) glance-12.0.0/glance/async/flows/0000775000567000056710000000000012701407204017661 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/async/flows/base_import.py0000664000567000056710000004555512701407047022562 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import os import glance_store as store_api from glance_store import backend from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_utils import encodeutils from oslo_utils import excutils import six from stevedore import named from taskflow.patterns import linear_flow as lf from taskflow import retry from taskflow import task from taskflow.types import failure from glance.common import exception from glance.common.scripts.image_import import main as image_import from glance.common.scripts import utils as script_utils from glance.i18n import _, _LE, _LI LOG = logging.getLogger(__name__) CONF = cfg.CONF class _CreateImage(task.Task): default_provides = 'image_id' def __init__(self, task_id, task_type, task_repo, image_repo, image_factory): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory super(_CreateImage, self).__init__( name='%s-CreateImage-%s' % (task_type, task_id)) def execute(self): task = script_utils.get_task(self.task_repo, self.task_id) if task is None: return task_input = script_utils.unpack_task_input(task) image = image_import.create_image( self.image_repo, self.image_factory, task_input.get('image_properties'), self.task_id) LOG.debug("Task %(task_id)s created image %(image_id)s", {'task_id': task.task_id, 'image_id': image.image_id}) return image.image_id def revert(self, *args, **kwargs): # TODO(flaper87): Define the revert rules for images on failures. # Deleting the image may not be what we want since users could upload # the image data in a separate step. However, it really depends on # when the failure happened. I guess we should check if data has been # written, although at that point failures are (should be) unexpected, # at least image-workflow wise. pass class _ImportToFS(task.Task): default_provides = 'file_path' def __init__(self, task_id, task_type, task_repo, uri): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo self.uri = uri super(_ImportToFS, self).__init__( name='%s-ImportToFS-%s' % (task_type, task_id)) if CONF.task.work_dir is None: msg = (_("%(task_id)s of %(task_type)s not configured " "properly. Missing work dir: %(work_dir)s") % {'task_id': self.task_id, 'task_type': self.task_type, 'work_dir': CONF.task.work_dir}) raise exception.BadTaskConfiguration(msg) self.store = self._build_store() def _build_store(self): # NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're # forced to build our own config object, register the required options # (and by required I mean *ALL* of them, even the ones we don't want), # and create our own store instance by calling a private function. # This is certainly unfortunate but it's the best we can do until the # glance_store refactor is done. A good thing is that glance_store is # under our team's management and it gates on Glance so changes to # this API will (should?) break task's tests. conf = cfg.ConfigOpts() backend.register_opts(conf) conf.set_override('filesystem_store_datadir', CONF.task.work_dir, group='glance_store', enforce_type=True) # NOTE(flaper87): Do not even try to judge me for this... :( # With the glance_store refactor, this code will change, until # that happens, we don't have a better option and this is the # least worst one, IMHO. store = backend._load_store(conf, 'file') if store is None: msg = (_("%(task_id)s of %(task_type)s not configured " "properly. Could not load the filesystem store") % {'task_id': self.task_id, 'task_type': self.task_type}) raise exception.BadTaskConfiguration(msg) store.configure() return store def execute(self, image_id): """Create temp file into store and return path to it :param image_id: Glance Image ID """ # NOTE(flaper87): We've decided to use a separate `work_dir` for # this task - and tasks coming after this one - as a way to expect # users to configure a local store for pre-import works on the image # to happen. # # While using any path should be "technically" fine, it's not what # we recommend as the best solution. For more details on this, please # refer to the comment in the `_ImportToStore.execute` method. data = script_utils.get_image_data_iter(self.uri) path = self.store.add(image_id, data, 0, context=None)[0] try: # NOTE(flaper87): Consider moving this code to a common # place that other tasks can consume as well. stdout, stderr = putils.trycmd('qemu-img', 'info', '--output=json', path, log_errors=putils.LOG_ALL_ERRORS) except OSError as exc: with excutils.save_and_reraise_exception(): exc_message = encodeutils.exception_to_unicode(exc) msg = (_LE('Failed to execute security checks on the image ' '%(task_id)s: %(exc)s') % {'task_id': self.task_id, 'exc': exc_message}) LOG.error(msg) metadata = json.loads(stdout) backing_file = metadata.get('backing-filename') if backing_file is not None: msg = _("File %(path)s has invalid backing file " "%(bfile)s, aborting.") % {'path': path, 'bfile': backing_file} raise RuntimeError(msg) return path def revert(self, image_id, result, **kwargs): if isinstance(result, failure.Failure): LOG.exception(_LE('Task: %(task_id)s failed to import image ' '%(image_id)s to the filesystem.') % {'task_id': self.task_id, 'image_id': image_id}) return if os.path.exists(result.split("file://")[-1]): store_api.delete_from_backend(result) class _DeleteFromFS(task.Task): def __init__(self, task_id, task_type): self.task_id = task_id self.task_type = task_type super(_DeleteFromFS, self).__init__( name='%s-DeleteFromFS-%s' % (task_type, task_id)) def execute(self, file_path): """Remove file from the backend :param file_path: path to the file being deleted """ store_api.delete_from_backend(file_path) class _ImportToStore(task.Task): def __init__(self, task_id, task_type, image_repo, uri): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo self.uri = uri super(_ImportToStore, self).__init__( name='%s-ImportToStore-%s' % (task_type, task_id)) def execute(self, image_id, file_path=None): """Bringing the introspected image to back end store :param image_id: Glance Image ID :param file_path: path to the image file """ # NOTE(flaper87): There are a couple of interesting bits in the # interaction between this task and the `_ImportToFS` one. I'll try # to cover them in this comment. # # NOTE(flaper87): # `_ImportToFS` downloads the image to a dedicated `work_dir` which # needs to be configured in advance (please refer to the config option # docs for more info). The motivation behind this is also explained in # the `_ImportToFS.execute` method. # # Due to the fact that we have an `_ImportToFS` task which downloads # the image data already, we need to be as smart as we can in this task # to avoid downloading the data several times and reducing the copy or # write times. There are several scenarios where the interaction # between this task and `_ImportToFS` could be improved. All these # scenarios assume the `_ImportToFS` task has been executed before # and/or in a more abstract scenario, that `file_path` is being # provided. # # Scenario 1: FS Store is Remote, introspection enabled, # conversion disabled # # In this scenario, the user would benefit from having the scratch path # being the same path as the fs store. Only one write would happen and # an extra read will happen in order to introspect the image. Note that # this read is just for the image headers and not the entire file. # # Scenario 2: FS Store is remote, introspection enabled, # conversion enabled # # In this scenario, the user would benefit from having a *local* store # into which the image can be converted. This will require downloading # the image locally, converting it and then copying the converted image # to the remote store. # # Scenario 3: FS Store is local, introspection enabled, # conversion disabled # Scenario 4: FS Store is local, introspection enabled, # conversion enabled # # In both these scenarios the user shouldn't care if the FS # store path and the work dir are the same, therefore probably # benefit, about the scratch path and the FS store being the # same from a performance perspective. Space wise, regardless # of the scenario, the user will have to account for it in # advance. # # Lets get to it and identify the different scenarios in the # implementation image = self.image_repo.get(image_id) image.status = 'saving' self.image_repo.save(image) # NOTE(flaper87): Let's dance... and fall # # Unfortunatelly, because of the way our domain layers work and # the checks done in the FS store, we can't simply rename the file # and set the location. To do that, we'd have to duplicate the logic # of every and each of the domain factories (quota, location, etc) # and we'd also need to hack the FS store to prevent it from raising # a "duplication path" error. I'd rather have this task copying the # image bits one more time than duplicating all that logic. # # Since I don't think this should be the definitive solution, I'm # leaving the code below as a reference for what should happen here # once the FS store and domain code will be able to handle this case. # # if file_path is None: # image_import.set_image_data(image, self.uri, None) # return # NOTE(flaper87): Don't assume the image was stored in the # work_dir. Think in the case this path was provided by another task. # Also, lets try to neither assume things nor create "logic" # dependencies between this task and `_ImportToFS` # # base_path = os.path.dirname(file_path.split("file://")[-1]) # NOTE(flaper87): Hopefully just scenarios #3 and #4. I say # hopefully because nothing prevents the user to use the same # FS store path as a work dir # # image_path = os.path.join(base_path, image_id) # # if (base_path == CONF.glance_store.filesystem_store_datadir or # base_path in CONF.glance_store.filesystem_store_datadirs): # os.rename(file_path, image_path) # # image_import.set_image_data(image, image_path, None) image_import.set_image_data(image, file_path or self.uri, self.task_id) # NOTE(flaper87): We need to save the image again after the locations # have been set in the image. self.image_repo.save(image) class _SaveImage(task.Task): def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_SaveImage, self).__init__( name='%s-SaveImage-%s' % (task_type, task_id)) def execute(self, image_id): """Transition image status to active :param image_id: Glance Image ID """ new_image = self.image_repo.get(image_id) if new_image.status == 'saving': # NOTE(flaper87): THIS IS WRONG! # we should be doing atomic updates to avoid # race conditions. This happens in other places # too. new_image.status = 'active' self.image_repo.save(new_image) class _CompleteTask(task.Task): def __init__(self, task_id, task_type, task_repo): self.task_id = task_id self.task_type = task_type self.task_repo = task_repo super(_CompleteTask, self).__init__( name='%s-CompleteTask-%s' % (task_type, task_id)) def execute(self, image_id): """Finishing the task flow :param image_id: Glance Image ID """ task = script_utils.get_task(self.task_repo, self.task_id) if task is None: return try: task.succeed({'image_id': image_id}) except Exception as e: # Note: The message string contains Error in it to indicate # in the task.message that it's a error message for the user. # TODO(nikhil): need to bring back save_and_reraise_exception when # necessary err_msg = ("Error: " + six.text_type(type(e)) + ': ' + encodeutils.exception_to_unicode(e)) log_msg = err_msg + _LE("Task ID %s") % task.task_id LOG.exception(log_msg) task.fail(err_msg) finally: self.task_repo.save(task) LOG.info(_LI("%(task_id)s of %(task_type)s completed"), {'task_id': self.task_id, 'task_type': self.task_type}) def _get_import_flows(**kwargs): # NOTE(flaper87): Until we have a better infrastructure to enable # and disable tasks plugins, hard-code the tasks we know exist, # instead of loading everything from the namespace. This guarantees # both, the load order of these plugins and the fact that no random # plugins will be added/loaded until we feel comfortable with this. # Future patches will keep using NamedExtensionManager but they'll # rely on a config option to control this process. extensions = named.NamedExtensionManager('glance.flows.import', names=['ovf_process', 'convert', 'introspect'], name_order=True, invoke_on_load=True, invoke_kwds=kwargs) for ext in extensions.extensions: yield ext.obj def get_flow(**kwargs): """Return task flow :param task_id: Task ID :param task_type: Type of the task :param task_repo: Task repo :param image_repo: Image repository used :param image_factory: Glance Image Factory :param uri: uri for the image file """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') task_repo = kwargs.get('task_repo') image_repo = kwargs.get('image_repo') image_factory = kwargs.get('image_factory') uri = kwargs.get('uri') flow = lf.Flow(task_type, retry=retry.AlwaysRevert()).add( _CreateImage(task_id, task_type, task_repo, image_repo, image_factory)) import_to_store = _ImportToStore(task_id, task_type, image_repo, uri) try: # NOTE(flaper87): ImportToLocal and DeleteFromLocal shouldn't be here. # Ideally, we should have the different import flows doing this for us # and this function should clean up duplicated tasks. For example, say # 2 flows need to have a local copy of the image - ImportToLocal - in # order to be able to complete the task - i.e Introspect-. In that # case, the introspect.get_flow call should add both, ImportToLocal and # DeleteFromLocal, to the flow and this function will reduce the # duplicated calls to those tasks by creating a linear flow that # ensures those are called before the other tasks. For now, I'm # keeping them here, though. limbo = lf.Flow(task_type).add(_ImportToFS(task_id, task_type, task_repo, uri)) for subflow in _get_import_flows(**kwargs): limbo.add(subflow) # NOTE(flaper87): We have hard-coded 2 tasks, # if there aren't more than 2, it means that # no subtask has been registered. if len(limbo) > 1: flow.add(limbo) # NOTE(flaper87): Until this implementation gets smarter, # make sure ImportToStore is called *after* the imported # flow stages. If not, the image will be set to saving state # invalidating tasks like Introspection or Convert. flow.add(import_to_store) # NOTE(flaper87): Since this is an "optional" task but required # when `limbo` is executed, we're adding it in its own subflow # to isolate it from the rest of the flow. delete_flow = lf.Flow(task_type).add(_DeleteFromFS(task_id, task_type)) flow.add(delete_flow) else: flow.add(import_to_store) except exception.BadTaskConfiguration: # NOTE(flaper87): If something goes wrong with the load of # import tasks, make sure we go on. flow.add(import_to_store) flow.add( _SaveImage(task_id, task_type, image_repo), _CompleteTask(task_id, task_type, task_repo) ) return flow glance-12.0.0/glance/async/flows/__init__.py0000664000567000056710000000000012701407047021765 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/async/flows/ovf_process.py0000664000567000056710000002443512701407051022573 0ustar jenkinsjenkins00000000000000# Copyright 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import re import shutil import tarfile try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET from oslo_config import cfg from oslo_serialization import jsonutils as json from six.moves import urllib from taskflow.patterns import linear_flow as lf from taskflow import task from glance import i18n _ = i18n._ _LE = i18n._LE _LW = i18n._LW LOG = logging.getLogger(__name__) CONF = cfg.CONF # Define the CIM namespaces here. Currently we will be supporting extracting # properties only from CIM_ProcessorAllocationSettingData CIM_NS = {'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' 'CIM_ProcessorAllocationSettingData': 'cim_pasd'} class _OVF_Process(task.Task): """ Extracts the single disk image from an OVA tarball and saves it to the Glance image store. It also parses the included OVF file for selected metadata which it then saves in the image store as the previously saved image's properties. """ default_provides = 'file_path' def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_OVF_Process, self).__init__( name='%s-OVF_Process-%s' % (task_type, task_id)) def _get_extracted_file_path(self, image_id): return os.path.join(CONF.task.work_dir, "%s.extracted" % image_id) def _get_ova_iter_objects(self, uri): """Returns iterable object either for local file or uri :param uri: uri (remote or local) to the ova package we want to iterate """ if uri.startswith("file://"): uri = uri.split("file://")[-1] return open(uri, "rb") return urllib.request.urlopen(uri) def execute(self, image_id, file_path): """ :param image_id: Id to use when storing extracted image to Glance image store. It is assumed that some other task has already created a row in the store with this id. :param file_path: Path to the OVA package """ image = self.image_repo.get(image_id) # Expect 'ova' as image container format for OVF_Process task if image.container_format == 'ova': # FIXME(dramakri): This is an admin-only feature for security # reasons. Ideally this should be achieved by making the import # task API admin only. This is one of the items that the upcoming # import refactoring work plans to do. Until then, we will check # the context as a short-cut. if image.context and image.context.is_admin: extractor = OVAImageExtractor() data_iter = self._get_ova_iter_objects(file_path) disk, properties = extractor.extract(data_iter) image.extra_properties.update(properties) image.container_format = 'bare' self.image_repo.save(image) dest_path = self._get_extracted_file_path(image_id) with open(dest_path, 'wb') as f: shutil.copyfileobj(disk, f, 4096) # Overwrite the input ova file since it is no longer needed os.rename(dest_path, file_path.split("file://")[-1]) else: raise RuntimeError(_('OVA extract is limited to admin')) return file_path def revert(self, image_id, result, **kwargs): fs_path = self._get_extracted_file_path(image_id) if os.path.exists(fs_path): os.path.remove(fs_path) class OVAImageExtractor(object): """Extracts and parses the uploaded OVA package A class that extracts the disk image and OVF file from an OVA tar archive. Parses the OVF file for metadata of interest. """ def __init__(self): self.interested_properties = [] self._load_interested_properties() def extract(self, ova): """Extracts disk image and OVF file from OVA package Extracts a single disk image and OVF from OVA tar archive and calls OVF parser method. :param ova: a file object containing the OVA file :returns: a tuple of extracted disk file object and dictionary of properties parsed from the OVF file :raises: RuntimeError for malformed OVA and OVF files """ with tarfile.open(fileobj=ova) as tar_file: filenames = tar_file.getnames() ovf_filename = next((filename for filename in filenames if filename.endswith('.ovf')), None) if ovf_filename: ovf = tar_file.extractfile(ovf_filename) disk_name, properties = self._parse_OVF(ovf) ovf.close() else: raise RuntimeError(_('Could not find OVF file in OVA archive ' 'file.')) disk = tar_file.extractfile(disk_name) return (disk, properties) def _parse_OVF(self, ovf): """Parses the OVF file Parses the OVF file for specified metadata properties. Interested properties must be specfied in ovf-metadata.json conf file. The OVF file's qualified namespaces are removed from the included properties. :param ovf: a file object containing the OVF file :returns: a tuple of disk filename and a properties dictionary :raises: RuntimeError for malformed OVF file """ def _get_namespace_and_tag(tag): """Separate and return the namespace and tag elements. There is no native support for this operation in elementtree package. See http://bugs.python.org/issue18304 for details. """ m = re.match(r'\{(.+)\}(.+)', tag) if m: return m.group(1), m.group(2) else: return '', tag disk_filename, file_elements, file_ref = None, None, None properties = {} for event, elem in ET.iterparse(ovf): if event == 'end': ns, tag = _get_namespace_and_tag(elem.tag) if ns in CIM_NS and tag in self.interested_properties: properties[CIM_NS[ns] + '_' + tag] = (elem.text.strip() if elem.text else '') if tag == 'DiskSection': disks = [child for child in list(elem) if _get_namespace_and_tag(child.tag)[1] == 'Disk'] if len(disks) > 1: """ Currently only single disk image extraction is supported. FIXME(dramakri): Support multiple images in OVA package """ raise RuntimeError(_('Currently, OVA packages ' 'containing multiple disk are ' 'not supported.')) disk = next(iter(disks)) file_ref = next(value for key, value in disk.items() if _get_namespace_and_tag(key)[1] == 'fileRef') if tag == 'References': file_elements = list(elem) # Clears elements to save memory except for 'File' and 'Disk' # references, which we will need to later access if tag != 'File' and tag != 'Disk': elem.clear() for file_element in file_elements: file_id = next(value for key, value in file_element.items() if _get_namespace_and_tag(key)[1] == 'id') if file_id != file_ref: continue disk_filename = next(value for key, value in file_element.items() if _get_namespace_and_tag(key)[1] == 'href') return (disk_filename, properties) def _load_interested_properties(self): """Find the OVF properties config file and load it. OVF properties config file specifies which metadata of interest to extract. Reads in a JSON file named 'ovf-metadata.json' if available. See example file at etc/ovf-metadata.json.sample. """ filename = 'ovf-metadata.json' match = CONF.find_file(filename) if match: with open(match, 'r') as properties_file: properties = json.loads(properties_file.read()) self.interested_properties = properties.get( 'cim_pasd', []) if not self.interested_properties: LOG.warn(_('OVF metadata of interest was not specified ' 'in ovf-metadata.json config file. Please set ' '"cim_pasd" to a list of interested ' 'CIM_ProcessorAllocationSettingData ' 'properties.')) else: LOG.warn(_('OVF properties config file "ovf-metadata.json" was ' 'not found.')) def get_flow(**kwargs): """Returns task flow for OVF Process. :param task_id: Task ID :param task_type: Type of the task. :param image_repo: Image repository used. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s" % {'task_type': task_type, 'id': task_id, 'repo': image_repo}) return lf.Flow(task_type).add( _OVF_Process(task_id, task_type, image_repo), ) glance-12.0.0/glance/async/flows/introspect.py0000664000567000056710000000656412701407047022445 0ustar jenkinsjenkins00000000000000# Copyright 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging from oslo_concurrency import processutils as putils from oslo_utils import encodeutils from oslo_utils import excutils from taskflow.patterns import linear_flow as lf from glance.async import utils from glance.i18n import _LE LOG = logging.getLogger(__name__) class _Introspect(utils.OptionalTask): """Taskflow to pull the embedded metadata out of image file""" def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_Introspect, self).__init__( name='%s-Introspect-%s' % (task_type, task_id)) def execute(self, image_id, file_path): """Does the actual introspection :param image_id: Glance image ID :param file_path: Path to the file being introspected """ try: stdout, stderr = putils.trycmd('qemu-img', 'info', '--output=json', file_path, log_errors=putils.LOG_ALL_ERRORS) except OSError as exc: # NOTE(flaper87): errno == 2 means the executable file # was not found. For now, log an error and move forward # until we have a better way to enable/disable optional # tasks. if exc.errno != 2: with excutils.save_and_reraise_exception(): exc_message = encodeutils.exception_to_unicode(exc) msg = (_LE('Failed to execute introspection ' '%(task_id)s: %(exc)s') % {'task_id': self.task_id, 'exc': exc_message}) LOG.error(msg) return if stderr: raise RuntimeError(stderr) metadata = json.loads(stdout) new_image = self.image_repo.get(image_id) new_image.virtual_size = metadata.get('virtual-size', 0) new_image.disk_format = metadata.get('format') self.image_repo.save(new_image) LOG.debug("%(task_id)s: Introspection successful: %(file)s", {'task_id': self.task_id, 'file': file_path}) return new_image def get_flow(**kwargs): """Return task flow for introspecting images to obtain metadata about the image. :param task_id: Task ID :param task_type: Type of the task. :param image_repo: Image repository used. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s", {'task_type': task_type, 'id': task_id, 'repo': image_repo}) return lf.Flow(task_type).add( _Introspect(task_id, task_type, image_repo), ) glance-12.0.0/glance/async/flows/convert.py0000664000567000056710000000755712701407047021736 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from oslo_concurrency import processutils as putils from oslo_config import cfg from taskflow.patterns import linear_flow as lf from taskflow import task from glance.i18n import _, _LW LOG = logging.getLogger(__name__) convert_task_opts = [ cfg.StrOpt('conversion_format', choices=('qcow2', 'raw', 'vmdk'), help=_("The format to which images will be automatically " "converted. When using the RBD backend, this should be " "set to 'raw'")), ] CONF = cfg.CONF # NOTE(flaper87): Registering under the taskflow_executor section # for now. It seems a waste to have a whole section dedicated to a # single task with a single option. CONF.register_opts(convert_task_opts, group='taskflow_executor') class _Convert(task.Task): conversion_missing_warned = False def __init__(self, task_id, task_type, image_repo): self.task_id = task_id self.task_type = task_type self.image_repo = image_repo super(_Convert, self).__init__( name='%s-Convert-%s' % (task_type, task_id)) def execute(self, image_id, file_path): # NOTE(flaper87): A format must be explicitly # specified. There's no "sane" default for this # because the dest format may work differently depending # on the environment OpenStack is running in. conversion_format = CONF.taskflow_executor.conversion_format if conversion_format is None: if not _Convert.conversion_missing_warned: msg = (_LW('The conversion format is None, please add a value ' 'for it in the config file for this task to ' 'work: %s') % self.task_id) LOG.warn(msg) _Convert.conversion_missing_warned = True return # TODO(flaper87): Check whether the image is in the desired # format already. Probably using `qemu-img` just like the # `Introspection` task. dest_path = os.path.join(CONF.task.work_dir, "%s.converted" % image_id) stdout, stderr = putils.trycmd('qemu-img', 'convert', '-O', conversion_format, file_path, dest_path, log_errors=putils.LOG_ALL_ERRORS) if stderr: raise RuntimeError(stderr) os.rename(dest_path, file_path.split("file://")[-1]) return file_path def revert(self, image_id, result=None, **kwargs): # NOTE(flaper87): If result is None, it probably # means this task failed. Otherwise, we would have # a result from its execution. if result is None: return fs_path = result.split("file://")[-1] if os.path.exists(fs_path): os.remove(fs_path) def get_flow(**kwargs): """Return task flow for converting images to different formats. :param task_id: Task ID. :param task_type: Type of the task. :param image_repo: Image repository used. """ task_id = kwargs.get('task_id') task_type = kwargs.get('task_type') image_repo = kwargs.get('image_repo') return lf.Flow(task_type).add( _Convert(task_id, task_type, image_repo), ) glance-12.0.0/glance/async/taskflow_executor.py0000664000567000056710000001336012701407047022661 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import futurist from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from six.moves import urllib from stevedore import driver from taskflow import engines from taskflow.listeners import logging as llistener import glance.async from glance.common import exception from glance.common.scripts import utils as script_utils from glance.i18n import _, _LE LOG = logging.getLogger(__name__) _deprecated_opt = cfg.DeprecatedOpt('eventlet_executor_pool_size', group='task') taskflow_executor_opts = [ cfg.StrOpt('engine_mode', default='parallel', choices=('serial', 'parallel'), help=_("The mode in which the engine will run. " "Can be 'serial' or 'parallel'.")), cfg.IntOpt('max_workers', default=10, help=_("The number of parallel activities executed at the " "same time by the engine. The value can be greater " "than one when the engine mode is 'parallel'."), deprecated_opts=[_deprecated_opt]) ] CONF = cfg.CONF CONF.register_opts(taskflow_executor_opts, group='taskflow_executor') class TaskExecutor(glance.async.TaskExecutor): def __init__(self, context, task_repo, image_repo, image_factory): self.context = context self.task_repo = task_repo self.image_repo = image_repo self.image_factory = image_factory super(TaskExecutor, self).__init__(context, task_repo, image_repo, image_factory) @staticmethod def _fetch_an_executor(): if CONF.taskflow_executor.engine_mode != 'parallel': return None else: max_workers = CONF.taskflow_executor.max_workers try: return futurist.GreenThreadPoolExecutor( max_workers=max_workers) except RuntimeError: # NOTE(harlowja): I guess eventlet isn't being made # useable, well just use native threads then (or try to). return futurist.ThreadPoolExecutor(max_workers=max_workers) def _get_flow(self, task): try: task_input = script_utils.unpack_task_input(task) uri = script_utils.validate_location_uri( task_input.get('import_from')) kwds = { 'uri': uri, 'task_id': task.task_id, 'task_type': task.type, 'context': self.context, 'task_repo': self.task_repo, 'image_repo': self.image_repo, 'image_factory': self.image_factory } return driver.DriverManager('glance.flows', task.type, invoke_on_load=True, invoke_kwds=kwds).driver except urllib.error.URLError as exc: raise exception.ImportTaskError(message=exc.reason) except exception.BadStoreUri as exc: raise exception.ImportTaskError(message=exc.msg) except RuntimeError: raise NotImplementedError() def begin_processing(self, task_id): try: super(TaskExecutor, self).begin_processing(task_id) except exception.ImportTaskError as exc: LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') % {'task_id': task_id, 'exc': exc.msg}) task = self.task_repo.get(task_id) task.fail(exc.msg) self.task_repo.save(task) def _run(self, task_id, task_type): LOG.debug('Taskflow executor picked up the execution of task ID ' '%(task_id)s of task type ' '%(task_type)s', {'task_id': task_id, 'task_type': task_type}) task = script_utils.get_task(self.task_repo, task_id) if task is None: # NOTE: This happens if task is not found in the database. In # such cases, there is no way to update the task status so, # it's ignored here. return flow = self._get_flow(task) executor = self._fetch_an_executor() try: engine = engines.load( flow, engine=CONF.taskflow_executor.engine_mode, executor=executor, max_workers=CONF.taskflow_executor.max_workers) with llistener.DynamicLoggingListener(engine, log=LOG): engine.run() except Exception as exc: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') % {'task_id': task_id, 'exc': encodeutils.exception_to_unicode(exc)}) # TODO(sabari): Check for specific exceptions and update the # task failure message. task.fail(_('Task failed due to Internal Error')) self.task_repo.save(task) finally: if executor is not None: executor.shutdown() glance-12.0.0/glance/scrubber.py0000664000567000056710000003224012701407051017574 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import calendar import time import eventlet from glance_store import exceptions as store_exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import six from glance.common import crypt from glance.common import exception from glance import context import glance.db as db_api from glance.i18n import _, _LE, _LI, _LW import glance.registry.client.v1.api as registry LOG = logging.getLogger(__name__) scrubber_opts = [ cfg.IntOpt('scrub_time', default=0, help=_('The amount of time in seconds to delay before ' 'performing a delete.')), cfg.IntOpt('scrub_pool_size', default=1, help=_('The size of thread pool to be used for ' 'scrubbing images. The default is one, which ' 'signifies serial scrubbing. Any value above ' 'one indicates the max number of images that ' 'may be scrubbed in parallel.')), cfg.BoolOpt('delayed_delete', default=False, help=_('Turn on/off delayed delete.')), cfg.StrOpt('admin_role', default='admin', help=_('Role used to identify an authenticated user as ' 'administrator.')), cfg.BoolOpt('send_identity_headers', default=False, help=_("Whether to pass through headers containing user " "and tenant information when making requests to " "the registry. This allows the registry to use the " "context middleware without keystonemiddleware's " "auth_token middleware, removing calls to the keystone " "auth service. It is recommended that when using this " "option, secure communication between glance api and " "glance registry is ensured by means other than " "auth_token middleware.")), ] scrubber_cmd_opts = [ cfg.IntOpt('wakeup_time', default=300, help=_('Loop time between checking for new ' 'items to schedule for delete.')) ] scrubber_cmd_cli_opts = [ cfg.BoolOpt('daemon', short='D', default=False, help=_('Run as a long-running process. When not ' 'specified (the default) run the scrub operation ' 'once and then exits. When specified do not exit ' 'and run scrub on wakeup_time interval as ' 'specified in the config.')) ] CONF = cfg.CONF CONF.register_opts(scrubber_opts) CONF.import_opt('metadata_encryption_key', 'glance.common.config') class ScrubDBQueue(object): """Database-based image scrub queue class.""" def __init__(self): self.scrub_time = CONF.scrub_time self.metadata_encryption_key = CONF.metadata_encryption_key registry.configure_registry_client() registry.configure_registry_admin_creds() admin_user = CONF.admin_user admin_tenant = CONF.admin_tenant_name if CONF.send_identity_headers: # When registry is operating in trusted-auth mode roles = [CONF.admin_role] self.admin_context = context.RequestContext(user=admin_user, tenant=admin_tenant, auth_token=None, roles=roles) self.registry = registry.get_registry_client(self.admin_context) else: ctxt = context.RequestContext() self.registry = registry.get_registry_client(ctxt) admin_token = self.registry.auth_token self.admin_context = context.RequestContext(user=admin_user, tenant=admin_tenant, auth_token=admin_token) def add_location(self, image_id, location): """Adding image location to scrub queue. :param image_id: The opaque image identifier :param location: The opaque image location :retval A boolean value to indicate success or not """ loc_id = location.get('id') if loc_id: db_api.get_api().image_location_delete(self.admin_context, image_id, loc_id, 'pending_delete') return True else: return False def _get_images_page(self, marker): filters = {'deleted': True, 'is_public': 'none', 'status': 'pending_delete'} if marker: return self.registry.get_images_detailed(filters=filters, marker=marker) else: return self.registry.get_images_detailed(filters=filters) def _get_all_images(self): """Generator to fetch all appropriate images, paging as needed.""" marker = None while True: images = self._get_images_page(marker) if len(images) == 0: break marker = images[-1]['id'] for image in images: yield image def get_all_locations(self): """Returns a list of image id and location tuple from scrub queue. :retval a list of image id, location id and uri tuple from scrub queue """ ret = [] for image in self._get_all_images(): deleted_at = image.get('deleted_at') if not deleted_at: continue # NOTE: Strip off microseconds which may occur after the last '.,' # Example: 2012-07-07T19:14:34.974216 date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0] delete_time = calendar.timegm(time.strptime(date_str, "%Y-%m-%dT%H:%M:%S")) if delete_time + self.scrub_time > time.time(): continue for loc in image['location_data']: if loc['status'] != 'pending_delete': continue if self.metadata_encryption_key: uri = crypt.urlsafe_encrypt(self.metadata_encryption_key, loc['url'], 64) else: uri = loc['url'] ret.append((image['id'], loc['id'], uri)) return ret def has_image(self, image_id): """Returns whether the queue contains an image or not. :param image_id: The opaque image identifier :retval a boolean value to inform including or not """ try: image = self.registry.get_image(image_id) return image['status'] == 'pending_delete' except exception.NotFound: return False _db_queue = None def get_scrub_queue(): global _db_queue if not _db_queue: _db_queue = ScrubDBQueue() return _db_queue class Daemon(object): def __init__(self, wakeup_time=300, threads=100): LOG.info(_LI("Starting Daemon: wakeup_time=%(wakeup_time)s " "threads=%(threads)s"), {'wakeup_time': wakeup_time, 'threads': threads}) self.wakeup_time = wakeup_time self.event = eventlet.event.Event() # This pool is used for periodic instantiation of scrubber self.daemon_pool = eventlet.greenpool.GreenPool(threads) def start(self, application): self._run(application) def wait(self): try: self.event.wait() except KeyboardInterrupt: msg = _LI("Daemon Shutdown on KeyboardInterrupt") LOG.info(msg) def _run(self, application): LOG.debug("Running application") self.daemon_pool.spawn_n(application.run, self.event) eventlet.spawn_after(self.wakeup_time, self._run, application) LOG.debug("Next run scheduled in %s seconds", self.wakeup_time) class Scrubber(object): def __init__(self, store_api): LOG.info(_LI("Initializing scrubber with configuration: %s"), six.text_type({'registry_host': CONF.registry_host, 'registry_port': CONF.registry_port})) self.store_api = store_api registry.configure_registry_client() registry.configure_registry_admin_creds() # Here we create a request context with credentials to support # delayed delete when using multi-tenant backend storage admin_user = CONF.admin_user admin_tenant = CONF.admin_tenant_name if CONF.send_identity_headers: # When registry is operating in trusted-auth mode roles = [CONF.admin_role] self.admin_context = context.RequestContext(user=admin_user, tenant=admin_tenant, auth_token=None, roles=roles) self.registry = registry.get_registry_client(self.admin_context) else: ctxt = context.RequestContext() self.registry = registry.get_registry_client(ctxt) auth_token = self.registry.auth_token self.admin_context = context.RequestContext(user=admin_user, tenant=admin_tenant, auth_token=auth_token) self.db_queue = get_scrub_queue() self.pool = eventlet.greenpool.GreenPool(CONF.scrub_pool_size) def _get_delete_jobs(self): try: records = self.db_queue.get_all_locations() except Exception as err: LOG.error(_LE("Can not get scrub jobs from queue: %s") % encodeutils.exception_to_unicode(err)) return {} delete_jobs = {} for image_id, loc_id, loc_uri in records: if image_id not in delete_jobs: delete_jobs[image_id] = [] delete_jobs[image_id].append((image_id, loc_id, loc_uri)) return delete_jobs def run(self, event=None): delete_jobs = self._get_delete_jobs() if delete_jobs: list(self.pool.starmap(self._scrub_image, delete_jobs.items())) def _scrub_image(self, image_id, delete_jobs): if len(delete_jobs) == 0: return LOG.info(_LI("Scrubbing image %(id)s from %(count)d locations."), {'id': image_id, 'count': len(delete_jobs)}) success = True for img_id, loc_id, uri in delete_jobs: try: self._delete_image_location_from_backend(img_id, loc_id, uri) except Exception: success = False if success: image = self.registry.get_image(image_id) if image['status'] == 'pending_delete': self.registry.update_image(image_id, {'status': 'deleted'}) LOG.info(_LI("Image %s has been scrubbed successfully"), image_id) else: LOG.warn(_LW("One or more image locations couldn't be scrubbed " "from backend. Leaving image '%s' in 'pending_delete'" " status") % image_id) def _delete_image_location_from_backend(self, image_id, loc_id, uri): if CONF.metadata_encryption_key: uri = crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri) try: LOG.debug("Scrubbing image %s from a location.", image_id) try: self.store_api.delete_from_backend(uri, self.admin_context) except store_exceptions.NotFound: LOG.info(_LI("Image location for image '%s' not found in " "backend; Marking image location deleted in " "db."), image_id) if loc_id != '-': db_api.get_api().image_location_delete(self.admin_context, image_id, int(loc_id), 'deleted') LOG.info(_LI("Image %s is scrubbed from a location."), image_id) except Exception as e: LOG.error(_LE("Unable to scrub image %(id)s from a location. " "Reason: %(exc)s ") % {'id': image_id, 'exc': encodeutils.exception_to_unicode(e)}) raise glance-12.0.0/glance/common/0000775000567000056710000000000012701407204016702 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/common/glare/0000775000567000056710000000000012701407204017774 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/common/glare/__init__.py0000664000567000056710000000000012701407047022100 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/common/glare/definitions.py0000664000567000056710000005243212701407047022674 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import numbers import re import semantic_version import six import glance.common.exception as exc from glance.common.glare import declarative from glance.i18n import _ class Text(declarative.PropertyDefinition): """A text metadata property of arbitrary length Maps to TEXT columns in database, does not support sorting or filtering """ ALLOWED_TYPES = (six.string_types,) DB_TYPE = 'text' # noinspection PyAttributeOutsideInit class String(Text): """A string metadata property of limited length Maps to VARCHAR columns in database, supports filtering and sorting. May have constrains on length and regexp patterns. The maximum length is limited to 255 characters """ DB_TYPE = 'string' def __init__(self, max_length=255, min_length=0, pattern=None, **kwargs): """Defines a String metadata property. :param max_length: maximum value length :param min_length: minimum value length :param pattern: regexp pattern to match """ super(String, self).__init__(**kwargs) self.max_length(max_length) self.min_length(min_length) if pattern: self.pattern(pattern) # if default and/or allowed_values are specified (in base classes) # then we need to validate them against the newly added validators self._check_definition() def max_length(self, value): """Sets the maximum value length""" self._max_length = value if value is not None: if value > 255: raise exc.InvalidArtifactTypePropertyDefinition( _('Max string length may not exceed 255 characters')) self._add_validator('max_length', lambda v: len(v) <= self._max_length, _('Length is greater than maximum')) else: self._remove_validator('max_length') self._check_definition() def min_length(self, value): """Sets the minimum value length""" self._min_length = value if value is not None: if value < 0: raise exc.InvalidArtifactTypePropertyDefinition( _('Min string length may not be negative')) self._add_validator('min_length', lambda v: len(v) >= self._min_length, _('Length is less than minimum')) else: self._remove_validator('min_length') self._check_definition() def pattern(self, value): """Sets the regexp pattern to match""" self._pattern = value if value is not None: self._add_validator('pattern', lambda v: re.match(self._pattern, v) is not None, _('Does not match pattern')) else: self._remove_validator('pattern') self._check_definition() class SemVerString(String): """A String metadata property matching semver pattern""" def __init__(self, **kwargs): def validate(value): try: semantic_version.Version(value, partial=True) except ValueError: return False return True super(SemVerString, self).__init__(validators=[(validate, "Invalid semver string")], **kwargs) # noinspection PyAttributeOutsideInit class Integer(declarative.PropertyDefinition): """An Integer metadata property Maps to INT columns in Database, supports filtering and sorting. May have constraints on value """ ALLOWED_TYPES = (six.integer_types,) DB_TYPE = 'int' def __init__(self, min_value=None, max_value=None, **kwargs): """Defines an Integer metadata property :param min_value: minimum allowed value :param max_value: maximum allowed value """ super(Integer, self).__init__(**kwargs) if min_value is not None: self.min_value(min_value) if max_value is not None: self.max_value(max_value) # if default and/or allowed_values are specified (in base classes) # then we need to validate them against the newly added validators self._check_definition() def min_value(self, value): """Sets the minimum allowed value""" self._min_value = value if value is not None: self._add_validator('min_value', lambda v: v >= self._min_value, _('Value is less than minimum')) else: self._remove_validator('min_value') self._check_definition() def max_value(self, value): """Sets the maximum allowed value""" self._max_value = value if value is not None: self._add_validator('max_value', lambda v: v <= self._max_value, _('Value is greater than maximum')) else: self._remove_validator('max_value') self._check_definition() # noinspection PyAttributeOutsideInit class DateTime(declarative.PropertyDefinition): """A DateTime metadata property Maps to a DATETIME columns in database. Is not supported as Type Specific property, may be used only as Generic one May have constraints on value """ ALLOWED_TYPES = (datetime.datetime,) DB_TYPE = 'datetime' def __init__(self, min_value=None, max_value=None, **kwargs): """Defines a DateTime metadata property :param min_value: minimum allowed value :param max_value: maximum allowed value """ super(DateTime, self).__init__(**kwargs) if min_value is not None: self.min_value(min_value) if max_value is not None: self.max_value(max_value) # if default and/or allowed_values are specified (in base classes) # then we need to validate them against the newly added validators self._check_definition() def min_value(self, value): """Sets the minimum allowed value""" self._min_value = value if value is not None: self._add_validator('min_value', lambda v: v >= self._min_value, _('Value is less than minimum')) else: self._remove_validator('min_value') self._check_definition() def max_value(self, value): """Sets the maximum allowed value""" self._max_value = value if value is not None: self._add_validator('max_value', lambda v: v <= self._max_value, _('Value is greater than maximum')) else: self._remove_validator('max_value') self._check_definition() # noinspection PyAttributeOutsideInit class Numeric(declarative.PropertyDefinition): """A Numeric metadata property Maps to floating point number columns in Database, supports filtering and sorting. May have constraints on value """ ALLOWED_TYPES = numbers.Number DB_TYPE = 'numeric' def __init__(self, min_value=None, max_value=None, **kwargs): """Defines a Numeric metadata property :param min_value: minimum allowed value :param max_value: maximum allowed value """ super(Numeric, self).__init__(**kwargs) if min_value is not None: self.min_value(min_value) if max_value is not None: self.max_value(max_value) # if default and/or allowed_values are specified (in base classes) # then we need to validate them against the newly added validators self._check_definition() def min_value(self, value): """Sets the minimum allowed value""" self._min_value = value if value is not None: self._add_validator('min_value', lambda v: v >= self._min_value, _('Value is less than minimum')) else: self._remove_validator('min_value') self._check_definition() def max_value(self, value): """Sets the maximum allowed value""" self._max_value = value if value is not None: self._add_validator('max_value', lambda v: v <= self._max_value, _('Value is greater than maximum')) else: self._remove_validator('max_value') self._check_definition() class Boolean(declarative.PropertyDefinition): """A Boolean metadata property Maps to Boolean columns in database. Supports filtering and sorting. """ ALLOWED_TYPES = (bool,) DB_TYPE = 'bool' class Array(declarative.ListAttributeDefinition, declarative.PropertyDefinition, list): """An array metadata property May contain elements of any other PropertyDefinition types except Dict and Array. Each elements maps to appropriate type of columns in database. Preserves order. Allows filtering based on "Array contains Value" semantics May specify constrains on types of elements, their amount and uniqueness. """ ALLOWED_ITEM_TYPES = (declarative.PropertyDefinition,) def __init__(self, item_type=String(), min_size=0, max_size=None, unique=False, extra_items=True, **kwargs): """Defines an Array metadata property :param item_type: defines the types of elements in Array. If set to an instance of PropertyDefinition then all the elements have to be of that type. If set to list of such instances, then the elements on the corresponding positions have to be of the appropriate type. :param min_size: minimum size of the Array :param max_size: maximum size of the Array :param unique: if set to true, all the elements in the Array have to be unique """ if isinstance(item_type, Array): msg = _("Array property can't have item_type=Array") raise exc.InvalidArtifactTypePropertyDefinition(msg) declarative.ListAttributeDefinition.__init__(self, item_type=item_type, min_size=min_size, max_size=max_size, unique=unique) declarative.PropertyDefinition.__init__(self, **kwargs) class Dict(declarative.DictAttributeDefinition, declarative.PropertyDefinition, dict): """A dictionary metadata property May contain elements of any other PropertyDefinition types except Dict. Each elements maps to appropriate type of columns in database. Allows filtering and sorting by values of each key except the ones mapping the Text fields. May specify constrains on types of elements and their amount. """ ALLOWED_PROPERTY_TYPES = (declarative.PropertyDefinition,) def __init__(self, properties=String(), min_properties=0, max_properties=None, **kwargs): """Defines a dictionary metadata property :param properties: defines the types of dictionary values. If set to an instance of PropertyDefinition then all the value have to be of that type. If set to a dictionary with string keys and values of PropertyDefinition type, then the elements mapped by the corresponding have have to be of the appropriate type. :param min_properties: minimum allowed amount of properties in the dict :param max_properties: maximum allowed amount of properties in the dict """ declarative.DictAttributeDefinition.__init__( self, properties=properties, min_properties=min_properties, max_properties=max_properties) declarative.PropertyDefinition.__init__(self, **kwargs) class ArtifactType(declarative.get_declarative_base()): # noqa """A base class for all the Artifact Type definitions Defines the Generic metadata properties as attributes. """ id = String(required=True, readonly=True) type_name = String(required=True, readonly=True) type_version = SemVerString(required=True, readonly=True) name = String(required=True, mutable=False) version = SemVerString(required=True, mutable=False) description = Text() tags = Array(unique=True, default=[]) visibility = String(required=True, allowed_values=["private", "public", "shared", "community"], default="private") state = String(required=True, readonly=True, allowed_values=["creating", "active", "deactivated", "deleted"]) owner = String(required=True, readonly=True) created_at = DateTime(required=True, readonly=True) updated_at = DateTime(required=True, readonly=True) published_at = DateTime(readonly=True) deleted_at = DateTime(readonly=True) def __init__(self, **kwargs): if "type_name" in kwargs: raise exc.InvalidArtifactPropertyValue( _("Unable to specify artifact type explicitly")) if "type_version" in kwargs: raise exc.InvalidArtifactPropertyValue( _("Unable to specify artifact type version explicitly")) super(ArtifactType, self).__init__(type_name=self.metadata.type_name, type_version=self.metadata.type_version, **kwargs) def __eq__(self, other): if not isinstance(other, ArtifactType): return False return self.id == other.id def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.id) def __is_mutable__(self): return self.state == "creating" class ArtifactReference(declarative.RelationDefinition): """An artifact reference definition Allows to define constraints by the name and version of target artifact """ ALLOWED_TYPES = ArtifactType def __init__(self, type_name=None, type_version=None, **kwargs): """Defines an artifact reference :param type_name: type name of the target artifact :param type_version: type version of the target artifact """ super(ArtifactReference, self).__init__(**kwargs) if type_name is not None: if isinstance(type_name, list): type_names = list(type_name) if type_version is not None: raise exc.InvalidArtifactTypePropertyDefinition( _('Unable to specify version ' 'if multiple types are possible')) else: type_names = [type_name] def validate_reference(artifact): if artifact.type_name not in type_names: return False if (type_version is not None and artifact.type_version != type_version): return False return True self._add_validator('referenced_type', validate_reference, _("Invalid referenced type")) elif type_version is not None: raise exc.InvalidArtifactTypePropertyDefinition( _('Unable to specify version ' 'if type is not specified')) self._check_definition() class ArtifactReferenceList(declarative.ListAttributeDefinition, declarative.RelationDefinition, list): """A list of Artifact References Allows to define a collection of references to other artifacts, each optionally constrained by type name and type version """ ALLOWED_ITEM_TYPES = (ArtifactReference,) def __init__(self, references=ArtifactReference(), min_size=0, max_size=None, **kwargs): if isinstance(references, list): raise exc.InvalidArtifactTypePropertyDefinition( _("Invalid reference list specification")) declarative.RelationDefinition.__init__(self, **kwargs) declarative.ListAttributeDefinition.__init__(self, item_type=references, min_size=min_size, max_size=max_size, unique=True, default=[] if min_size == 0 else None) class Blob(object): """A Binary object being part of the Artifact""" def __init__(self, size=0, locations=None, checksum=None, item_key=None): """Initializes a new Binary Object for an Artifact :param size: the size of Binary Data :param locations: a list of data locations in backing stores :param checksum: a checksum for the data """ if locations is None: locations = [] self.size = size self.checksum = checksum self.locations = locations self.item_key = item_key def to_dict(self): return { "size": self.size, "checksum": self.checksum, } class BinaryObject(declarative.BlobDefinition, Blob): """A definition of BinaryObject binding Adds a BinaryObject to an Artifact Type, optionally constrained by file size and amount of locations """ ALLOWED_TYPES = (Blob,) def __init__(self, max_file_size=None, min_file_size=None, min_locations=None, max_locations=None, **kwargs): """Defines a binary object as part of Artifact Type :param max_file_size: maximum size of the associate Blob :param min_file_size: minimum size of the associated Blob :param min_locations: minimum number of locations in the associated Blob :param max_locations: maximum number of locations in the associated Blob """ mutable = kwargs.pop('mutable', False) if mutable: raise exc.InvalidArtifactTypePropertyDefinition( _("BinaryObject property cannot be declared mutable")) super(BinaryObject, self).__init__(default=None, readonly=False, mutable=mutable, **kwargs) self._max_file_size = max_file_size self._min_file_size = min_file_size self._min_locations = min_locations self._max_locations = max_locations self._add_validator('size_not_empty', lambda v: v.size is not None, _('Blob size is not set')) if max_file_size: self._add_validator('max_size', lambda v: v.size <= self._max_file_size, _("File too large")) if min_file_size: self._add_validator('min_size', lambda v: v.size >= self._min_file_size, _("File too small")) if min_locations: self._add_validator('min_locations', lambda v: len( v.locations) >= self._min_locations, _("Too few locations")) if max_locations: self._add_validator( 'max_locations', lambda v: len(v.locations) <= self._max_locations, _("Too many locations")) class BinaryObjectList(declarative.ListAttributeDefinition, declarative.BlobDefinition, list): """A definition of binding to the list of BinaryObject Adds a list of BinaryObject's to an artifact type, optionally constrained by the number of objects in the list and their uniqueness """ ALLOWED_ITEM_TYPES = (BinaryObject,) def __init__(self, objects=BinaryObject(), min_count=0, max_count=None, **kwargs): declarative.BlobDefinition.__init__(self, **kwargs) declarative.ListAttributeDefinition.__init__(self, item_type=objects, min_size=min_count, max_size=max_count, unique=True) self.default = [] if min_count == 0 else None glance-12.0.0/glance/common/glare/declarative.py0000664000567000056710000006752712701407047022657 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import semantic_version import six from glance.common import exception as exc from glance.i18n import _ class AttributeDefinition(object): """A base class for the attribute definitions which may be added to declaratively defined artifact types """ ALLOWED_TYPES = (object,) def __init__(self, display_name=None, description=None, readonly=False, mutable=True, required=False, default=None): """Initializes attribute definition :param display_name: Display name of the attribute :param description: Description of the attribute :param readonly: Flag indicating if the value of attribute may not be changed once an artifact is created :param mutable: Flag indicating if the value of attribute may not be changed once an artifact is published :param required: Flag indicating if the value of attribute is required :param default: default value of the attribute """ self.name = None self.display_name = display_name self.description = description self.readonly = readonly self.required = required self.mutable = mutable self.default = default self._add_validator('type', lambda v: isinstance(v, self.ALLOWED_TYPES), _("Not a valid value type")) self._validate_default() def _set_name(self, value): self.name = value if self.display_name is None: self.display_name = value def _add_validator(self, name, func, message): if not hasattr(self, '_validators'): self._validators = [] self._validators_index = {} pair = (func, message) self._validators.append(pair) self._validators_index[name] = pair def _get_validator(self, name): return self._validators_index.get(name) def _remove_validator(self, name): pair = self._validators_index.pop(name, None) if pair is not None: self._validators.remove(pair) def _check_definition(self): self._validate_default() def _validate_default(self): if self.default: try: self.validate(self.default, 'default') except exc.InvalidArtifactPropertyValue: raise exc.InvalidArtifactTypePropertyDefinition( _("Default value is invalid")) def get_value(self, obj): return getattr(obj, self.name) def set_value(self, obj, value): return setattr(obj, self.name, value) def validate(self, value, name=None): if value is None: if self.required: raise exc.InvalidArtifactPropertyValue( name=name or self.name, val=value, msg=_('Value is required')) else: return first_error = next((msg for v_func, msg in self._validators if not v_func(value)), None) if first_error: raise exc.InvalidArtifactPropertyValue(name=name or self.name, val=value, msg=first_error) class ListAttributeDefinition(AttributeDefinition): """A base class for Attribute definitions having List-semantics Is inherited by Array, ArtifactReferenceList and BinaryObjectList """ ALLOWED_TYPES = (list,) ALLOWED_ITEM_TYPES = (AttributeDefinition, ) def _check_item_type(self, item): if not isinstance(item, self.ALLOWED_ITEM_TYPES): raise exc.InvalidArtifactTypePropertyDefinition( _('Invalid item type specification')) if item.default is not None: raise exc.InvalidArtifactTypePropertyDefinition( _('List definitions may hot have defaults')) def __init__(self, item_type, min_size=0, max_size=None, unique=False, **kwargs): super(ListAttributeDefinition, self).__init__(**kwargs) if isinstance(item_type, list): for it in item_type: self._check_item_type(it) # we need to copy the item_type collection self.item_type = item_type[:] if min_size != 0: raise exc.InvalidArtifactTypePropertyDefinition( _("Cannot specify 'min_size' explicitly") ) if max_size is not None: raise exc.InvalidArtifactTypePropertyDefinition( _("Cannot specify 'max_size' explicitly") ) # setting max_size and min_size to the length of item_type, # as tuple-semantic assumes that the number of elements is set # by the type spec min_size = max_size = len(item_type) else: self._check_item_type(item_type) self.item_type = item_type if min_size: self.min_size(min_size) if max_size: self.max_size(max_size) if unique: self.unique() def min_size(self, value): self._min_size = value if value is not None: self._add_validator('min_size', lambda v: len(v) >= self._min_size, _('List size is less than minimum')) else: self._remove_validator('min_size') def max_size(self, value): self._max_size = value if value is not None: self._add_validator('max_size', lambda v: len(v) <= self._max_size, _('List size is greater than maximum')) else: self._remove_validator('max_size') def unique(self, value=True): self._unique = value if value: def _unique(items): seen = set() for item in items: if item in seen: return False seen.add(item) return True self._add_validator('unique', _unique, _('Items have to be unique')) else: self._remove_validator('unique') def _set_name(self, value): super(ListAttributeDefinition, self)._set_name(value) if isinstance(self.item_type, list): for i, item in enumerate(self.item_type): item._set_name("%s[%i]" % (value, i)) else: self.item_type._set_name("%s[*]" % value) def validate(self, value, name=None): super(ListAttributeDefinition, self).validate(value, name) if value is not None: for i, item in enumerate(value): self._validate_item_at(item, i) def get_item_definition_at_index(self, index): if isinstance(self.item_type, list): if index < len(self.item_type): return self.item_type[index] else: return None return self.item_type def _validate_item_at(self, item, index): item_type = self.get_item_definition_at_index(index) # set name if none has been given to the list element at given index if (isinstance(self.item_type, list) and item_type and not item_type.name): item_type.name = "%s[%i]" % (self.name, index) if item_type: item_type.validate(item) class DictAttributeDefinition(AttributeDefinition): """A base class for Attribute definitions having Map-semantics Is inherited by Dict """ ALLOWED_TYPES = (dict,) ALLOWED_PROPERTY_TYPES = (AttributeDefinition,) def _check_prop(self, key, item): if (not isinstance(item, self.ALLOWED_PROPERTY_TYPES) or (key is not None and not isinstance(key, six.string_types))): raise exc.InvalidArtifactTypePropertyDefinition( _('Invalid dict property type specification')) @staticmethod def _validate_key(key): if not isinstance(key, six.string_types): raise exc.InvalidArtifactPropertyValue( _('Invalid dict property type')) def __init__(self, properties, min_properties=0, max_properties=0, **kwargs): super(DictAttributeDefinition, self).__init__(**kwargs) if isinstance(properties, dict): for key, value in six.iteritems(properties): self._check_prop(key, value) # copy the properties dict self.properties = properties.copy() self._add_validator('keys', lambda v: set(v.keys()) <= set( self.properties.keys()), _('Dictionary contains unexpected key(s)')) else: self._check_prop(None, properties) self.properties = properties if min_properties: self.min_properties(min_properties) if max_properties: self.max_properties(max_properties) def min_properties(self, value): self._min_properties = value if value is not None: self._add_validator('min_properties', lambda v: len(v) >= self._min_properties, _('Dictionary size is less than ' 'minimum')) else: self._remove_validator('min_properties') def max_properties(self, value): self._max_properties = value if value is not None: self._add_validator('max_properties', lambda v: len(v) <= self._max_properties, _('Dictionary size is ' 'greater than maximum')) else: self._remove_validator('max_properties') def _set_name(self, value): super(DictAttributeDefinition, self)._set_name(value) if isinstance(self.properties, dict): for k, v in six.iteritems(self.properties): v._set_name(value) else: self.properties._set_name(value) def validate(self, value, name=None): super(DictAttributeDefinition, self).validate(value, name) if value is not None: for k, v in six.iteritems(value): self._validate_item_with_key(v, k) def _validate_item_with_key(self, value, key): self._validate_key(key) if isinstance(self.properties, dict): prop_def = self.properties.get(key) if prop_def is not None: name = "%s[%s]" % (prop_def.name, key) prop_def.validate(value, name=name) else: name = "%s[%s]" % (self.properties.name, key) self.properties.validate(value, name=name) def get_prop_definition_at_key(self, key): if isinstance(self.properties, dict): return self.properties.get(key) else: return self.properties class PropertyDefinition(AttributeDefinition): """A base class for Attributes defining generic or type-specific metadata properties """ DB_TYPE = None def __init__(self, internal=False, allowed_values=None, validators=None, **kwargs): """Defines a metadata property :param internal: a flag indicating that the property is internal, i.e. not returned to client :param allowed_values: specifies a list of values allowed for the property :param validators: specifies a list of custom validators for the property """ super(PropertyDefinition, self).__init__(**kwargs) self.internal = internal self._allowed_values = None if validators is not None: try: for i, (f, m) in enumerate(validators): self._add_validator("custom_%i" % i, f, m) except ValueError: raise exc.InvalidArtifactTypePropertyDefinition( _("Custom validators list should contain tuples " "'(function, message)'")) if allowed_values is not None: # copy the allowed_values, as this is going to create a # closure, and we need to make sure that external modification of # this list does not affect the created validator self.allowed_values(allowed_values) self._check_definition() def _validate_allowed_values(self): if self._allowed_values: try: for allowed_value in self._allowed_values: self.validate(allowed_value, 'allowed_value') except exc.InvalidArtifactPropertyValue: raise exc.InvalidArtifactTypePropertyDefinition( _("Allowed values %s are invalid under given validators") % self._allowed_values) def allowed_values(self, values): self._allowed_values = values[:] if values is not None: self._add_validator('allowed', lambda v: v in self._allowed_values, _("Is not allowed value")) else: self._remove_validator('allowed') self._check_definition() def _check_definition(self): self._validate_allowed_values() super(PropertyDefinition, self)._check_definition() class RelationDefinition(AttributeDefinition): """A base class for Attributes defining cross-artifact relations""" def __init__(self, internal=False, **kwargs): self.internal = internal kwargs.setdefault('mutable', False) # if mutable=True has been passed -> raise an exception if kwargs['mutable'] is True: raise exc.InvalidArtifactTypePropertyDefinition( _("Dependency relations cannot be mutable")) super(RelationDefinition, self).__init__(**kwargs) class BlobDefinition(AttributeDefinition): """A base class for Attributes defining binary objects""" pass class ArtifactTypeMetaclass(type): """A metaclass to build Artifact Types. Not intended to be used directly Use `get_declarative_base` to get the base class instead """ def __init__(cls, class_name, bases, attributes): if '_declarative_artifact_type' not in cls.__dict__: _build_declarative_meta(cls) super(ArtifactTypeMetaclass, cls).__init__(class_name, bases, attributes) class ArtifactPropertyDescriptor(object): """A descriptor object for working with artifact attributes""" def __init__(self, prop, collection_wrapper_class=None): self.prop = prop self.collection_wrapper_class = collection_wrapper_class def __get__(self, instance, owner): if instance is None: # accessed via owner class return self.prop else: v = getattr(instance, '_' + self.prop.name, None) if v is None and self.prop.default is not None: v = copy.copy(self.prop.default) self.__set__(instance, v, ignore_mutability=True) return self.__get__(instance, owner) else: if v is not None and self.collection_wrapper_class: if self.prop.readonly: readonly = True elif (not self.prop.mutable and hasattr(instance, '__is_mutable__') and not hasattr(instance, '__suspend_mutability_checks__')): readonly = not instance.__is_mutable__() else: readonly = False if readonly: v = v.__make_immutable__() return v def __set__(self, instance, value, ignore_mutability=False): if instance: if self.prop.readonly: if hasattr(instance, '_' + self.prop.name): raise exc.InvalidArtifactPropertyValue( _('Attempt to set readonly property')) if not self.prop.mutable: if (hasattr(instance, '__is_mutable__') and not hasattr(instance, '__suspend_mutability_checks__')): mutable = instance.__is_mutable__() or ignore_mutability if not mutable: raise exc.InvalidArtifactPropertyValue( _('Attempt to set value of immutable property')) if value is not None and self.collection_wrapper_class: value = self.collection_wrapper_class(value) value.property = self.prop self.prop.validate(value) setattr(instance, '_' + self.prop.name, value) class ArtifactAttributes(object): """A container class storing description of Artifact Type attributes""" def __init__(self): self.properties = {} self.dependencies = {} self.blobs = {} self.all = {} @property def default_dependency(self): """Returns the default dependency relation for an artifact type""" if len(self.dependencies) == 1: return list(self.dependencies.values())[0] @property def default_blob(self): """Returns the default blob object for an artifact type""" if len(self.blobs) == 1: return list(self.blobs.values())[0] @property def default_properties_dict(self): """Returns a default properties dict for an artifact type""" dict_props = [v for v in self.properties.values() if isinstance(v, DictAttributeDefinition)] if len(dict_props) == 1: return dict_props[0] @property def tags(self): """Returns tags property for an artifact type""" return self.properties.get('tags') def add(self, attribute): self.all[attribute.name] = attribute if isinstance(attribute, PropertyDefinition): self.properties[attribute.name] = attribute elif isinstance(attribute, BlobDefinition): self.blobs[attribute.name] = attribute elif isinstance(attribute, RelationDefinition): self.dependencies[attribute.name] = attribute class ArtifactTypeMetadata(object): """A container to store the meta-information about an artifact type""" def __init__(self, type_name, type_display_name, type_version, type_description, endpoint): """Initializes the Artifact Type metadata :param type_name: name of the artifact type :param type_display_name: display name of the artifact type :param type_version: version of the artifact type :param type_description: description of the artifact type :param endpoint: REST API URI suffix to call the artifacts of this type """ self.attributes = ArtifactAttributes() # These are going to be defined by third-party plugin # developers, so we need to do some validations on these values and # raise InvalidArtifactTypeDefinition if they are violated self.type_name = type_name self.type_display_name = type_display_name or type_name self.type_version = type_version or '1.0' self.type_description = type_description self.endpoint = endpoint or type_name.lower() self._validate_string(self.type_name, 'Type name', min_length=1, max_length=255) self._validate_string(self.type_display_name, 'Type display name', max_length=255) self._validate_string(self.type_description, 'Type description') self._validate_string(self.endpoint, 'endpoint', min_length=1) try: semantic_version.Version(self.type_version, partial=True) except ValueError: raise exc.InvalidArtifactTypeDefinition( message=_("Type version has to be a valid semver string")) @staticmethod def _validate_string(value, name, min_length=0, max_length=None, pattern=None): if value is None: if min_length > 0: raise exc.InvalidArtifactTypeDefinition( message=_("%(attribute)s is required"), attribute=name) else: return if not isinstance(value, six.string_types): raise exc.InvalidArtifactTypeDefinition( message=_("%(attribute)s have to be string"), attribute=name) if max_length and len(value) > max_length: raise exc.InvalidArtifactTypeDefinition( message=_("%(attribute)s may not be longer than %(length)i"), attribute=name, length=max_length) if min_length and len(value) < min_length: raise exc.InvalidArtifactTypeDefinition( message=_("%(attribute)s may not be shorter than %(length)i"), attribute=name, length=min_length) if pattern and not re.match(pattern, value): raise exc.InvalidArtifactTypeDefinition( message=_("%(attribute)s should match pattern %(pattern)s"), attribute=name, pattern=pattern.pattern) def _build_declarative_meta(cls): attrs = dict(cls.__dict__) type_name = None type_display_name = None type_version = None type_description = None endpoint = None for base in cls.__mro__: for name, value in six.iteritems(vars(base)): if name == '__type_name__': if not type_name: type_name = cls.__type_name__ elif name == '__type_version__': if not type_version: type_version = cls.__type_version__ elif name == '__type_description__': if not type_description: type_description = cls.__type_description__ elif name == '__endpoint__': if not endpoint: endpoint = cls.__endpoint__ elif name == '__type_display_name__': if not type_display_name: type_display_name = cls.__type_display_name__ elif base is not cls and name not in attrs: if isinstance(value, AttributeDefinition): attrs[name] = value elif isinstance(value, ArtifactPropertyDescriptor): attrs[name] = value.prop meta = ArtifactTypeMetadata(type_name=type_name or cls.__name__, type_display_name=type_display_name, type_version=type_version, type_description=type_description, endpoint=endpoint) setattr(cls, 'metadata', meta) for k, v in attrs.items(): if k == 'metadata': raise exc.InvalidArtifactTypePropertyDefinition( _("Cannot declare artifact property with reserved name " "'metadata'")) if isinstance(v, AttributeDefinition): v._set_name(k) wrapper_class = None if isinstance(v, ListAttributeDefinition): wrapper_class = type("ValidatedList", (list,), {}) _add_validation_to_list(wrapper_class) if isinstance(v, DictAttributeDefinition): wrapper_class = type("ValidatedDict", (dict,), {}) _add_validation_to_dict(wrapper_class) prop_descr = ArtifactPropertyDescriptor(v, wrapper_class) setattr(cls, k, prop_descr) meta.attributes.add(v) def _validating_method(method, klass): def wrapper(self, *args, **kwargs): instance_copy = klass(self) method(instance_copy, *args, **kwargs) self.property.validate(instance_copy) method(self, *args, **kwargs) return wrapper def _immutable_method(method): def substitution(*args, **kwargs): raise exc.InvalidArtifactPropertyValue( _("Unable to modify collection in " "immutable or readonly property")) return substitution def _add_immutable_wrappers(class_to_add, wrapped_methods): for method_name in wrapped_methods: method = getattr(class_to_add, method_name, None) if method: setattr(class_to_add, method_name, _immutable_method(method)) def _add_validation_wrappers(class_to_validate, base_class, validated_methods): for method_name in validated_methods: method = getattr(class_to_validate, method_name, None) if method: setattr(class_to_validate, method_name, _validating_method(method, base_class)) readonly_class = type("Readonly" + class_to_validate.__name__, (class_to_validate,), {}) _add_immutable_wrappers(readonly_class, validated_methods) def __make_immutable__(self): return readonly_class(self) class_to_validate.__make_immutable__ = __make_immutable__ def _add_validation_to_list(list_based_class): validated_methods = ['append', 'extend', 'insert', 'pop', 'remove', 'reverse', 'sort', '__setitem__', '__delitem__', '__delslice__'] _add_validation_wrappers(list_based_class, list, validated_methods) def _add_validation_to_dict(dict_based_class): validated_methods = ['pop', 'popitem', 'setdefault', 'update', '__delitem__', '__setitem__', 'clear'] _add_validation_wrappers(dict_based_class, dict, validated_methods) def _kwarg_init_constructor(self, **kwargs): self.__suspend_mutability_checks__ = True try: for k in kwargs: if not hasattr(type(self), k): raise exc.ArtifactInvalidProperty(prop=k) setattr(self, k, kwargs[k]) self._validate_required(self.metadata.attributes.properties) finally: del self.__suspend_mutability_checks__ def _validate_required(self, attribute_dict): for k, v in six.iteritems(attribute_dict): if v.required and (not hasattr(self, k) or getattr(self, k) is None): raise exc.InvalidArtifactPropertyValue(name=k, val=None, msg=_('Value is required')) def _update(self, values): for k in values: if hasattr(type(self), k): setattr(self, k, values[k]) else: raise exc.ArtifactInvalidProperty(prop=k) def _pre_publish_validator(self, *args, **kwargs): self._validate_required(self.metadata.attributes.blobs) self._validate_required(self.metadata.attributes.dependencies) _kwarg_init_constructor.__name__ = '__init__' _pre_publish_validator.__name__ = '__pre_publish__' _update.__name__ = 'update' def get_declarative_base(name='base', base_class=object): """Returns a base class which should be inherited to construct Artifact Type object using the declarative syntax of attribute definition """ bases = not isinstance(base_class, tuple) and (base_class,) or base_class class_dict = {'__init__': _kwarg_init_constructor, '_validate_required': _validate_required, '__pre_publish__': _pre_publish_validator, '_declarative_artifact_type': True, 'update': _update} return ArtifactTypeMetaclass(name, bases, class_dict) glance-12.0.0/glance/common/glare/loader.py0000664000567000056710000001676512701407047021640 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg import semantic_version from stevedore import enabled from glance.common import exception from glance.common.glare import definitions from glance.i18n import _, _LE, _LI, _LW from oslo_log import log as logging LOG = logging.getLogger(__name__) plugins_opts = [ cfg.BoolOpt('load_enabled', default=True, help=_('When false, no artifacts can be loaded regardless of' ' available_plugins. When true, artifacts can be' ' loaded.')), cfg.ListOpt('available_plugins', default=[], help=_('A list of artifacts that are allowed in the' ' format name or name-version. Empty list means that' ' any artifact can be loaded.')) ] CONF = cfg.CONF CONF.register_opts(plugins_opts) class ArtifactsPluginLoader(object): def __init__(self, namespace): self.mgr = enabled.EnabledExtensionManager( check_func=self._gen_check_func(), namespace=namespace, propagate_map_exceptions=True, on_load_failure_callback=self._on_load_failure) self.plugin_map = {'by_typename': {}, 'by_endpoint': {}} def _add_extension(ext): """ Plugins can be loaded as entry_point=single plugin and entry_point=PLUGIN_LIST, where PLUGIN_LIST is a python variable holding a list of plugins """ def _load_one(plugin): if issubclass(plugin, definitions.ArtifactType): # make sure that have correct plugin name art_name = plugin.metadata.type_name if art_name != ext.name: raise exception.ArtifactNonMatchingTypeName( name=art_name, plugin=ext.name) # make sure that no plugin with the same name and version # already exists exists = self._get_plugins(ext.name) new_tv = plugin.metadata.type_version if any(e.metadata.type_version == new_tv for e in exists): raise exception.ArtifactDuplicateNameTypeVersion() self._add_plugin("by_endpoint", plugin.metadata.endpoint, plugin) self._add_plugin("by_typename", plugin.metadata.type_name, plugin) if isinstance(ext.plugin, list): for p in ext.plugin: _load_one(p) else: _load_one(ext.plugin) # (ivasilevskaya) that looks pretty bad as RuntimeError is too general, # but stevedore has awful exception wrapping with no specific class # for this very case (no extensions for given namespace found) try: self.mgr.map(_add_extension) except RuntimeError as re: LOG.error(_LE("Unable to load artifacts: %s") % re.message) def _version(self, artifact): return semantic_version.Version.coerce(artifact.metadata.type_version) def _add_plugin(self, spec, name, plugin): """ Inserts a new plugin into a sorted by desc type_version list of existing plugins in order to retrieve the latest by next() """ def _add(name, value): self.plugin_map[spec][name] = value old_order = copy.copy(self._get_plugins(name, spec=spec)) for i, p in enumerate(old_order): if self._version(p) < self._version(plugin): _add(name, old_order[0:i] + [plugin] + old_order[i:]) return _add(name, old_order + [plugin]) def _get_plugins(self, name, spec="by_typename"): if spec not in self.plugin_map.keys(): return [] return self.plugin_map[spec].get(name, []) def _gen_check_func(self): """generates check_func for EnabledExtensionManager""" def _all_forbidden(ext): LOG.warn(_LW("Can't load artifact %s: load disabled in config") % ext.name) raise exception.ArtifactLoadError(name=ext.name) def _all_allowed(ext): LOG.info( _LI("Artifact %s has been successfully loaded"), ext.name) return True if not CONF.load_enabled: return _all_forbidden if len(CONF.available_plugins) == 0: return _all_allowed available = [] for name in CONF.available_plugins: type_name, version = (name.split('-', 1) if '-' in name else (name, None)) available.append((type_name, version)) def _check_ext(ext): try: next(n for n, v in available if n == ext.plugin.metadata.type_name and (v is None or v == ext.plugin.metadata.type_version)) except StopIteration: LOG.warn(_LW("Can't load artifact %s: not in" " available_plugins list") % ext.name) raise exception.ArtifactLoadError(name=ext.name) LOG.info( _LI("Artifact %s has been successfully loaded"), ext.name) return True return _check_ext # this has to be done explicitly as stevedore is pretty ignorant when # face to face with an Exception and tries to swallow it and print sth # irrelevant instead of expected error message def _on_load_failure(self, manager, ep, exc): msg = (_LE("Could not load plugin from %(module)s") % {"module": ep.module_name}) LOG.exception(msg) raise exc def _find_class_in_collection(self, collection, name, version=None): try: def _cmp_version(plugin, version): ver = semantic_version.Version.coerce return (ver(plugin.metadata.type_version) == ver(version)) if version: return next((p for p in collection if _cmp_version(p, version))) return next((p for p in collection)) except StopIteration: raise exception.ArtifactPluginNotFound( name="%s %s" % (name, "v %s" % version if version else "")) def get_class_by_endpoint(self, name, version=None): if version is None: classlist = self._get_plugins(name, spec="by_endpoint") if not classlist: raise exception.ArtifactPluginNotFound(name=name) return self._find_class_in_collection(classlist, name) return self._find_class_in_collection( self._get_plugins(name, spec="by_endpoint"), name, version) def get_class_by_typename(self, name, version=None): return self._find_class_in_collection( self._get_plugins(name, spec="by_typename"), name, version) glance-12.0.0/glance/common/glare/serialization.py0000664000567000056710000003174612701407047023243 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import six from glance.common import exception from glance.common.glare import declarative from glance.common.glare import definitions from glance import glare as ga from glance.i18n import _ COMMON_ARTIFACT_PROPERTIES = ['id', 'type_name', 'type_version', 'name', 'version', 'description', 'visibility', 'state', 'tags', 'owner', 'created_at', 'updated_at', 'published_at', 'deleted_at'] def _serialize_list_prop(prop, values): """ A helper func called to correctly serialize an Array property. Returns a dict {'type': some_supported_db_type, 'value': serialized_data} """ # FIXME(Due to a potential bug in declarative framework, for Arrays, that # are values to some dict items (Dict(properties={"foo": Array()})), # prop.get_value(artifact) returns not the real list of items, but the # whole dict). So we can't rely on prop.get_value(artifact) and will pass # correctly retrieved values to this function serialized_value = [] for i, val in enumerate(values or []): db_type = prop.get_item_definition_at_index(i).DB_TYPE if db_type is None: continue serialized_value.append({ 'type': db_type, 'value': val }) return serialized_value def _serialize_dict_prop(artifact, prop, key, value, save_prop_func): key_to_save = prop.name + '.' + key dict_key_prop = prop.get_prop_definition_at_key(key) db_type = dict_key_prop.DB_TYPE if (db_type is None and not isinstance(dict_key_prop, declarative.ListAttributeDefinition)): # nothing to do here, don't know how to deal with this type return elif isinstance(dict_key_prop, declarative.ListAttributeDefinition): serialized = _serialize_list_prop( dict_key_prop, # FIXME(see comment for _serialize_list_prop func) values=(dict_key_prop.get_value(artifact) or {}).get(key, [])) save_prop_func(key_to_save, 'array', serialized) else: save_prop_func(key_to_save, db_type, value) def _serialize_dependencies(artifact): """Returns a dict of serialized dependencies for given artifact""" dependencies = {} for relation in artifact.metadata.attributes.dependencies.values(): serialized_dependency = [] if isinstance(relation, declarative.ListAttributeDefinition): for dep in relation.get_value(artifact): serialized_dependency.append(dep.id) else: relation_data = relation.get_value(artifact) if relation_data: serialized_dependency.append(relation.get_value(artifact).id) dependencies[relation.name] = serialized_dependency return dependencies def _serialize_blobs(artifact): """Return a dict of serialized blobs for given artifact""" blobs = {} for blob in artifact.metadata.attributes.blobs.values(): serialized_blob = [] if isinstance(blob, declarative.ListAttributeDefinition): for b in blob.get_value(artifact) or []: serialized_blob.append({ 'size': b.size, 'locations': b.locations, 'checksum': b.checksum, 'item_key': b.item_key }) else: b = blob.get_value(artifact) # if no value for blob has been set -> continue if not b: continue serialized_blob.append({ 'size': b.size, 'locations': b.locations, 'checksum': b.checksum, 'item_key': b.item_key }) blobs[blob.name] = serialized_blob return blobs def serialize_for_db(artifact): result = {} custom_properties = {} def _save_prop(prop_key, prop_type, value): custom_properties[prop_key] = { 'type': prop_type, 'value': value } for prop in artifact.metadata.attributes.properties.values(): if prop.name in COMMON_ARTIFACT_PROPERTIES: result[prop.name] = prop.get_value(artifact) continue if isinstance(prop, declarative.ListAttributeDefinition): serialized_value = _serialize_list_prop(prop, prop.get_value(artifact)) _save_prop(prop.name, 'array', serialized_value) elif isinstance(prop, declarative.DictAttributeDefinition): fields_to_set = prop.get_value(artifact) or {} # if some keys are not present (like in prop == {}), then have to # set their values to None. # XXX FIXME prop.properties may be a dict ({'foo': '', 'bar': ''}) # or String\Integer\whatsoever, limiting the possible dict values. # In the latter case have no idea how to remove old values during # serialization process. if isinstance(prop.properties, dict): for key in [k for k in prop.properties if k not in fields_to_set.keys()]: _serialize_dict_prop(artifact, prop, key, None, _save_prop) # serialize values of properties present for key, value in six.iteritems(fields_to_set): _serialize_dict_prop(artifact, prop, key, value, _save_prop) elif prop.DB_TYPE is not None: _save_prop(prop.name, prop.DB_TYPE, prop.get_value(artifact)) result['properties'] = custom_properties result['dependencies'] = _serialize_dependencies(artifact) result['blobs'] = _serialize_blobs(artifact) return result def _deserialize_blobs(artifact_type, blobs_from_db, artifact_properties): """Retrieves blobs from database""" for blob_name, blob_value in six.iteritems(blobs_from_db): if not blob_value: continue if isinstance(artifact_type.metadata.attributes.blobs.get(blob_name), declarative.ListAttributeDefinition): val = [] for v in blob_value: b = definitions.Blob(size=v['size'], locations=v['locations'], checksum=v['checksum'], item_key=v['item_key']) val.append(b) elif len(blob_value) == 1: val = definitions.Blob(size=blob_value[0]['size'], locations=blob_value[0]['locations'], checksum=blob_value[0]['checksum'], item_key=blob_value[0]['item_key']) else: raise exception.InvalidArtifactPropertyValue( message=_('Blob %(name)s may not have multiple values'), name=blob_name) artifact_properties[blob_name] = val def _deserialize_dependencies(artifact_type, deps_from_db, artifact_properties, plugins): """Retrieves dependencies from database""" for dep_name, dep_value in six.iteritems(deps_from_db): if not dep_value: continue if isinstance( artifact_type.metadata.attributes.dependencies.get(dep_name), declarative.ListAttributeDefinition): val = [] for v in dep_value: val.append(deserialize_from_db(v, plugins)) elif len(dep_value) == 1: val = deserialize_from_db(dep_value[0], plugins) else: raise exception.InvalidArtifactPropertyValue( message=_('Relation %(name)s may not have multiple values'), name=dep_name) artifact_properties[dep_name] = val def deserialize_from_db(db_dict, plugins): artifact_properties = {} type_name = None type_version = None for prop_name in COMMON_ARTIFACT_PROPERTIES: prop_value = db_dict.pop(prop_name, None) if prop_name == 'type_name': type_name = prop_value elif prop_name == 'type_version': type_version = prop_value else: artifact_properties[prop_name] = prop_value try: artifact_type = plugins.get_class_by_typename(type_name, type_version) except exception.ArtifactPluginNotFound: raise exception.UnknownArtifactType(name=type_name, version=type_version) type_specific_properties = db_dict.pop('properties', {}) for prop_name, prop_value in six.iteritems(type_specific_properties): prop_type = prop_value.get('type') prop_value = prop_value.get('value') if prop_value is None: continue if '.' in prop_name: # dict-based property name, key = prop_name.split('.', 1) artifact_properties.setdefault(name, {}) if prop_type == 'array': artifact_properties[name][key] = [item.get('value') for item in prop_value] else: artifact_properties[name][key] = prop_value elif prop_type == 'array': # list-based property artifact_properties[prop_name] = [item.get('value') for item in prop_value] else: artifact_properties[prop_name] = prop_value blobs = db_dict.pop('blobs', {}) _deserialize_blobs(artifact_type, blobs, artifact_properties) dependencies = db_dict.pop('dependencies', {}) _deserialize_dependencies(artifact_type, dependencies, artifact_properties, plugins) return artifact_type(**artifact_properties) def _process_blobs_for_client(artifact, result): """Processes artifact's blobs: adds download links and pretty-printed data. The result is stored in 'result' dict. """ def build_uri(blob_attr, position=None): """A helper func to build download uri""" template = "/artifacts/%(type)s/v%(version)s/%(id)s/%(prop)s/download" format_dict = { "type": artifact.metadata.endpoint, "version": artifact.type_version, "id": artifact.id, "prop": blob_attr.name } if position is not None: template = ("/artifacts/%(type)s/v%(version)s/" "%(id)s/%(prop)s/%(position)s/download") format_dict["position"] = position return template % format_dict for blob_attr in artifact.metadata.attributes.blobs.values(): value = blob_attr.get_value(artifact) if value is None: result[blob_attr.name] = None elif isinstance(value, collections.Iterable): res_list = [] for pos, blob in enumerate(value): blob_dict = blob.to_dict() blob_dict["download_link"] = build_uri(blob_attr, pos) res_list.append(blob_dict) result[blob_attr.name] = res_list else: result[blob_attr.name] = value.to_dict() result[blob_attr.name]["download_link"] = build_uri(blob_attr) def serialize_for_client(artifact, show_level=ga.Showlevel.NONE): # use serialize_for_db and modify some fields # (like properties, show only value, not type) result = {} for prop in artifact.metadata.attributes.properties.values(): result[prop.name] = prop.get_value(artifact) if show_level > ga.Showlevel.NONE: for dep in artifact.metadata.attributes.dependencies.values(): inner_show_level = (ga.Showlevel.DIRECT if show_level == ga.Showlevel.DIRECT else ga.Showlevel.NONE) value = dep.get_value(artifact) if value is None: result[dep.name] = None elif isinstance(value, list): result[dep.name] = [serialize_for_client(v, inner_show_level) for v in value] else: result[dep.name] = serialize_for_client(value, inner_show_level) _process_blobs_for_client(artifact, result) return result glance-12.0.0/glance/common/auth.py0000664000567000056710000002331712701407047020230 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This auth module is intended to allow OpenStack client-tools to select from a variety of authentication strategies, including NoAuth (the default), and Keystone (an identity management system). > auth_plugin = AuthPlugin(creds) > auth_plugin.authenticate() > auth_plugin.auth_token abcdefg > auth_plugin.management_url http://service_endpoint/ """ import httplib2 from keystoneclient import service_catalog as ks_service_catalog from oslo_log import log as logging from oslo_serialization import jsonutils # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import six.moves.urllib.parse as urlparse from glance.common import exception from glance.i18n import _ LOG = logging.getLogger(__name__) class BaseStrategy(object): def __init__(self): self.auth_token = None # TODO(sirp): Should expose selecting public/internal/admin URL. self.management_url = None def authenticate(self): raise NotImplementedError @property def is_authenticated(self): raise NotImplementedError @property def strategy(self): raise NotImplementedError class NoAuthStrategy(BaseStrategy): def authenticate(self): pass @property def is_authenticated(self): return True @property def strategy(self): return 'noauth' class KeystoneStrategy(BaseStrategy): MAX_REDIRECTS = 10 def __init__(self, creds, insecure=False, configure_via_auth=True): self.creds = creds self.insecure = insecure self.configure_via_auth = configure_via_auth super(KeystoneStrategy, self).__init__() def check_auth_params(self): # Ensure that supplied credential parameters are as required for required in ('username', 'password', 'auth_url', 'strategy'): if self.creds.get(required) is None: raise exception.MissingCredentialError(required=required) if self.creds['strategy'] != 'keystone': raise exception.BadAuthStrategy(expected='keystone', received=self.creds['strategy']) # For v2.0 also check tenant is present if self.creds['auth_url'].rstrip('/').endswith('v2.0'): if self.creds.get("tenant") is None: raise exception.MissingCredentialError(required='tenant') def authenticate(self): """Authenticate with the Keystone service. There are a few scenarios to consider here: 1. Which version of Keystone are we using? v1 which uses headers to pass the credentials, or v2 which uses a JSON encoded request body? 2. Keystone may respond back with a redirection using a 305 status code. 3. We may attempt a v1 auth when v2 is what's called for. In this case, we rewrite the url to contain /v2.0/ and retry using the v2 protocol. """ def _authenticate(auth_url): # If OS_AUTH_URL is missing a trailing slash add one if not auth_url.endswith('/'): auth_url += '/' token_url = urlparse.urljoin(auth_url, "tokens") # 1. Check Keystone version is_v2 = auth_url.rstrip('/').endswith('v2.0') if is_v2: self._v2_auth(token_url) else: self._v1_auth(token_url) self.check_auth_params() auth_url = self.creds['auth_url'] for redirect_iter in range(self.MAX_REDIRECTS): try: _authenticate(auth_url) except exception.AuthorizationRedirect as e: # 2. Keystone may redirect us auth_url = e.url except exception.AuthorizationFailure: # 3. In some configurations nova makes redirection to # v2.0 keystone endpoint. Also, new location does not # contain real endpoint, only hostname and port. if 'v2.0' not in auth_url: auth_url = urlparse.urljoin(auth_url, 'v2.0/') else: # If we successfully auth'd, then memorize the correct auth_url # for future use. self.creds['auth_url'] = auth_url break else: # Guard against a redirection loop raise exception.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS) def _v1_auth(self, token_url): creds = self.creds headers = { 'X-Auth-User': creds['username'], 'X-Auth-Key': creds['password'] } tenant = creds.get('tenant') if tenant: headers['X-Auth-Tenant'] = tenant resp, resp_body = self._do_request(token_url, 'GET', headers=headers) def _management_url(self, resp): for url_header in ('x-image-management-url', 'x-server-management-url', 'x-glance'): try: return resp[url_header] except KeyError as e: not_found = e raise not_found if resp.status in (200, 204): try: if self.configure_via_auth: self.management_url = _management_url(self, resp) self.auth_token = resp['x-auth-token'] except KeyError: raise exception.AuthorizationFailure() elif resp.status == 305: raise exception.AuthorizationRedirect(uri=resp['location']) elif resp.status == 400: raise exception.AuthBadRequest(url=token_url) elif resp.status == 401: raise exception.NotAuthenticated() elif resp.status == 404: raise exception.AuthUrlNotFound(url=token_url) else: raise Exception(_('Unexpected response: %s') % resp.status) def _v2_auth(self, token_url): creds = self.creds creds = { "auth": { "tenantName": creds['tenant'], "passwordCredentials": { "username": creds['username'], "password": creds['password'] } } } headers = {'Content-Type': 'application/json'} req_body = jsonutils.dumps(creds) resp, resp_body = self._do_request( token_url, 'POST', headers=headers, body=req_body) if resp.status == 200: resp_auth = jsonutils.loads(resp_body)['access'] creds_region = self.creds.get('region') if self.configure_via_auth: endpoint = get_endpoint(resp_auth['serviceCatalog'], endpoint_region=creds_region) self.management_url = endpoint self.auth_token = resp_auth['token']['id'] elif resp.status == 305: raise exception.RedirectException(resp['location']) elif resp.status == 400: raise exception.AuthBadRequest(url=token_url) elif resp.status == 401: raise exception.NotAuthenticated() elif resp.status == 404: raise exception.AuthUrlNotFound(url=token_url) else: raise Exception(_('Unexpected response: %s') % resp.status) @property def is_authenticated(self): return self.auth_token is not None @property def strategy(self): return 'keystone' def _do_request(self, url, method, headers=None, body=None): headers = headers or {} conn = httplib2.Http() conn.force_exception_to_status_code = True conn.disable_ssl_certificate_validation = self.insecure headers['User-Agent'] = 'glance-client' resp, resp_body = conn.request(url, method, headers=headers, body=body) return resp, resp_body def get_plugin_from_strategy(strategy, creds=None, insecure=False, configure_via_auth=True): if strategy == 'noauth': return NoAuthStrategy() elif strategy == 'keystone': return KeystoneStrategy(creds, insecure, configure_via_auth=configure_via_auth) else: raise Exception(_("Unknown auth strategy '%s'") % strategy) def get_endpoint(service_catalog, service_type='image', endpoint_region=None, endpoint_type='publicURL'): """ Select an endpoint from the service catalog We search the full service catalog for services matching both type and region. If the client supplied no region then any 'image' endpoint is considered a match. There must be one -- and only one -- successful match in the catalog, otherwise we will raise an exception. """ endpoints = ks_service_catalog.ServiceCatalogV2( {'serviceCatalog': service_catalog} ).get_urls(service_type=service_type, region_name=endpoint_region, endpoint_type=endpoint_type) if endpoints is None: raise exception.NoServiceEndpoint() elif len(endpoints) == 1: return endpoints[0] else: raise exception.RegionAmbiguity(region=endpoint_region) glance-12.0.0/glance/common/utils.py0000664000567000056710000006116712701407047020434 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 SoftLayer Technologies, Inc. # Copyright 2015 Mirantis, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ import errno try: from eventlet import sleep except ImportError: from time import sleep from eventlet.green import socket import functools import os import re import uuid from OpenSSL import crypto from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import netutils from oslo_utils import strutils import six from webob import exc from glance.common import exception from glance.i18n import _, _LE CONF = cfg.CONF LOG = logging.getLogger(__name__) # Whitelist of v1 API headers of form x-image-meta-xxx IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size', 'x-image-meta-is_public', 'x-image-meta-disk_format', 'x-image-meta-container_format', 'x-image-meta-name', 'x-image-meta-status', 'x-image-meta-copy_from', 'x-image-meta-uri', 'x-image-meta-checksum', 'x-image-meta-created_at', 'x-image-meta-updated_at', 'x-image-meta-deleted_at', 'x-image-meta-min_ram', 'x-image-meta-min_disk', 'x-image-meta-owner', 'x-image-meta-store', 'x-image-meta-id', 'x-image-meta-protected', 'x-image-meta-deleted', 'x-image-meta-virtual_size'] GLANCE_TEST_SOCKET_FD_STR = 'GLANCE_TEST_SOCKET_FD' def chunkreadable(iter, chunk_size=65536): """ Wrap a readable iterator with a reader yielding chunks of a preferred size, otherwise leave iterator unchanged. :param iter: an iter which may also be readable :param chunk_size: maximum size of chunk """ return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter def chunkiter(fp, chunk_size=65536): """ Return an iterator to a file-like obj which yields fixed size chunks :param fp: a file-like object :param chunk_size: maximum size of chunk """ while True: chunk = fp.read(chunk_size) if chunk: yield chunk else: break def cooperative_iter(iter): """ Return an iterator which schedules after each iteration. This can prevent eventlet thread starvation. :param iter: an iterator to wrap """ try: for chunk in iter: sleep(0) yield chunk except Exception as err: with excutils.save_and_reraise_exception(): msg = _LE("Error: cooperative_iter exception %s") % err LOG.error(msg) def cooperative_read(fd): """ Wrap a file descriptor's read with a partial function which schedules after each read. This can prevent eventlet thread starvation. :param fd: a file descriptor to wrap """ def readfn(*args): result = fd.read(*args) sleep(0) return result return readfn MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit class CooperativeReader(object): """ An eventlet thread friendly class for reading in image data. When accessing data either through the iterator or the read method we perform a sleep to allow a co-operative yield. When there is more than one image being uploaded/downloaded this prevents eventlet thread starvation, ie allows all threads to be scheduled periodically rather than having the same thread be continuously active. """ def __init__(self, fd): """ :param fd: Underlying image file object """ self.fd = fd self.iterator = None # NOTE(markwash): if the underlying supports read(), overwrite the # default iterator-based implementation with cooperative_read which # is more straightforward if hasattr(fd, 'read'): self.read = cooperative_read(fd) else: self.iterator = None self.buffer = b'' self.position = 0 def read(self, length=None): """Return the requested amount of bytes, fetching the next chunk of the underlying iterator when needed. This is replaced with cooperative_read in __init__ if the underlying fd already supports read(). """ if length is None: if len(self.buffer) - self.position > 0: # if no length specified but some data exists in buffer, # return that data and clear the buffer result = self.buffer[self.position:] self.buffer = b'' self.position = 0 return str(result) else: # otherwise read the next chunk from the underlying iterator # and return it as a whole. Reset the buffer, as subsequent # calls may specify the length try: if self.iterator is None: self.iterator = self.__iter__() return next(self.iterator) except StopIteration: return '' finally: self.buffer = b'' self.position = 0 else: result = bytearray() while len(result) < length: if self.position < len(self.buffer): to_read = length - len(result) chunk = self.buffer[self.position:self.position + to_read] result.extend(chunk) # This check is here to prevent potential OOM issues if # this code is called with unreasonably high values of read # size. Currently it is only called from the HTTP clients # of Glance backend stores, which use httplib for data # streaming, which has readsize hardcoded to 8K, so this # check should never fire. Regardless it still worths to # make the check, as the code may be reused somewhere else. if len(result) >= MAX_COOP_READER_BUFFER_SIZE: raise exception.LimitExceeded() self.position += len(chunk) else: try: if self.iterator is None: self.iterator = self.__iter__() self.buffer = next(self.iterator) self.position = 0 except StopIteration: self.buffer = b'' self.position = 0 return bytes(result) return bytes(result) def __iter__(self): return cooperative_iter(self.fd.__iter__()) class LimitingReader(object): """ Reader designed to fail when reading image data past the configured allowable amount. """ def __init__(self, data, limit): """ :param data: Underlying image data object :param limit: maximum number of bytes the reader should allow """ self.data = data self.limit = limit self.bytes_read = 0 def __iter__(self): for chunk in self.data: self.bytes_read += len(chunk) if self.bytes_read > self.limit: raise exception.ImageSizeLimitExceeded() else: yield chunk def read(self, i): result = self.data.read(i) self.bytes_read += len(result) if self.bytes_read > self.limit: raise exception.ImageSizeLimitExceeded() return result def image_meta_to_http_headers(image_meta): """ Returns a set of image metadata into a dict of HTTP headers that can be fed to either a Webob Request object or an httplib.HTTP(S)Connection object :param image_meta: Mapping of image metadata """ headers = {} for k, v in image_meta.items(): if v is not None: if k == 'properties': for pk, pv in v.items(): if pv is not None: headers["x-image-meta-property-%s" % pk.lower()] = six.text_type(pv) else: headers["x-image-meta-%s" % k.lower()] = six.text_type(v) return headers def get_image_meta_from_headers(response): """ Processes HTTP headers from a supplied response that match the x-image-meta and x-image-meta-property and returns a mapping of image metadata and properties :param response: Response to process """ result = {} properties = {} if hasattr(response, 'getheaders'): # httplib.HTTPResponse headers = response.getheaders() else: # webob.Response headers = response.headers.items() for key, value in headers: key = str(key.lower()) if key.startswith('x-image-meta-property-'): field_name = key[len('x-image-meta-property-'):].replace('-', '_') properties[field_name] = value or None elif key.startswith('x-image-meta-'): field_name = key[len('x-image-meta-'):].replace('-', '_') if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS: msg = _("Bad header: %(header_name)s") % {'header_name': key} raise exc.HTTPBadRequest(msg, content_type="text/plain") result[field_name] = value or None result['properties'] = properties for key, nullable in [('size', False), ('min_disk', False), ('min_ram', False), ('virtual_size', True)]: if key in result: try: result[key] = int(result[key]) except ValueError: if nullable and result[key] == str(None): result[key] = None else: extra = (_("Cannot convert image %(key)s '%(value)s' " "to an integer.") % {'key': key, 'value': result[key]}) raise exception.InvalidParameterValue(value=result[key], param=key, extra_msg=extra) if result[key] is not None and result[key] < 0: extra = _('Cannot be a negative value.') raise exception.InvalidParameterValue(value=result[key], param=key, extra_msg=extra) for key in ('is_public', 'deleted', 'protected'): if key in result: result[key] = strutils.bool_from_string(result[key]) return result def create_mashup_dict(image_meta): """ Returns a dictionary-like mashup of the image core properties and the image custom properties from given image metadata. :param image_meta: metadata of image with core and custom properties """ d = {} for key, value in six.iteritems(image_meta): if isinstance(value, dict): for subkey, subvalue in six.iteritems( create_mashup_dict(value)): if subkey not in image_meta: d[subkey] = subvalue else: d[key] = value return d def safe_mkdirs(path): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise def mutating(func): """Decorator to enforce read-only logic""" @functools.wraps(func) def wrapped(self, req, *args, **kwargs): if req.context.read_only: msg = "Read-only access" LOG.debug(msg) raise exc.HTTPForbidden(msg, request=req, content_type="text/plain") return func(self, req, *args, **kwargs) return wrapped def setup_remote_pydev_debug(host, port): error_msg = _LE('Error setting up the debug environment. Verify that the' ' option pydev_worker_debug_host is pointing to a valid ' 'hostname or IP on which a pydev server is listening on' ' the port indicated by pydev_worker_debug_port.') try: try: from pydev import pydevd except ImportError: import pydevd pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) return True except Exception: with excutils.save_and_reraise_exception(): LOG.exception(error_msg) def validate_key_cert(key_file, cert_file): try: error_key_name = "private key" error_filename = key_file with open(key_file, 'r') as keyfile: key_str = keyfile.read() key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str) error_key_name = "certificate" error_filename = cert_file with open(cert_file, 'r') as certfile: cert_str = certfile.read() cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) except IOError as ioe: raise RuntimeError(_("There is a problem with your %(error_key_name)s " "%(error_filename)s. Please verify it." " Error: %(ioe)s") % {'error_key_name': error_key_name, 'error_filename': error_filename, 'ioe': ioe}) except crypto.Error as ce: raise RuntimeError(_("There is a problem with your %(error_key_name)s " "%(error_filename)s. Please verify it. OpenSSL" " error: %(ce)s") % {'error_key_name': error_key_name, 'error_filename': error_filename, 'ce': ce}) try: data = str(uuid.uuid4()) # On Python 3, explicitly encode to UTF-8 to call crypto.sign() which # requires bytes. Otherwise, it raises a deprecation warning (and # will raise an error later). data = encodeutils.to_utf8(data) digest = CONF.digest_algorithm if digest == 'sha1': LOG.warn('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)' ' state that the SHA-1 is not suitable for' ' general-purpose digital signature applications (as' ' specified in FIPS 186-3) that require 112 bits of' ' security. The default value is sha1 in Kilo for a' ' smooth upgrade process, and it will be updated' ' with sha256 in next release(L).') out = crypto.sign(key, data, digest) crypto.verify(cert, out, data, digest) except crypto.Error as ce: raise RuntimeError(_("There is a problem with your key pair. " "Please verify that cert %(cert_file)s and " "key %(key_file)s belong together. OpenSSL " "error %(ce)s") % {'cert_file': cert_file, 'key_file': key_file, 'ce': ce}) def get_test_suite_socket(): global GLANCE_TEST_SOCKET_FD_STR if GLANCE_TEST_SOCKET_FD_STR in os.environ: fd = int(os.environ[GLANCE_TEST_SOCKET_FD_STR]) sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) if six.PY2: sock = socket.SocketType(_sock=sock) sock.listen(CONF.backlog) del os.environ[GLANCE_TEST_SOCKET_FD_STR] os.close(fd) return sock return None def is_valid_hostname(hostname): """Verify whether a hostname (not an FQDN) is valid.""" return re.match('^[a-zA-Z0-9-]+$', hostname) is not None def is_valid_fqdn(fqdn): """Verify whether a host is a valid FQDN.""" return re.match('^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None def parse_valid_host_port(host_port): """ Given a "host:port" string, attempts to parse it as intelligently as possible to determine if it is valid. This includes IPv6 [host]:port form, IPv4 ip:port form, and hostname:port or fqdn:port form. Invalid inputs will raise a ValueError, while valid inputs will return a (host, port) tuple where the port will always be of type int. """ try: try: host, port = netutils.parse_host_port(host_port) except Exception: raise ValueError(_('Host and port "%s" is not valid.') % host_port) if not netutils.is_valid_port(port): raise ValueError(_('Port "%s" is not valid.') % port) # First check for valid IPv6 and IPv4 addresses, then a generic # hostname. Failing those, if the host includes a period, then this # should pass a very generic FQDN check. The FQDN check for letters at # the tail end will weed out any hilariously absurd IPv4 addresses. if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or is_valid_hostname(host) or is_valid_fqdn(host)): raise ValueError(_('Host "%s" is not valid.') % host) except Exception as ex: raise ValueError(_('%s ' 'Please specify a host:port pair, where host is an ' 'IPv4 address, IPv6 address, hostname, or FQDN. If ' 'using an IPv6 address, enclose it in brackets ' 'separately from the port (i.e., ' '"[fe80::a:b:c]:9876").') % ex) return (host, int(port)) try: REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]') except re.error: # UCS-2 build case REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') def no_4byte_params(f): """ Checks that no 4 byte unicode characters are allowed in dicts' keys/values and string's parameters """ def wrapper(*args, **kwargs): def _is_match(some_str): return (isinstance(some_str, six.text_type) and REGEX_4BYTE_UNICODE.findall(some_str) != []) def _check_dict(data_dict): # a dict of dicts has to be checked recursively for key, value in six.iteritems(data_dict): if isinstance(value, dict): _check_dict(value) else: if _is_match(key): msg = _("Property names can't contain 4 byte unicode.") raise exception.Invalid(msg) if _is_match(value): msg = (_("%s can't contain 4 byte unicode characters.") % key.title()) raise exception.Invalid(msg) for data_dict in [arg for arg in args if isinstance(arg, dict)]: _check_dict(data_dict) # now check args for str values for arg in args: if _is_match(arg): msg = _("Param values can't contain 4 byte unicode.") raise exception.Invalid(msg) # check kwargs as well, as params are passed as kwargs via # registry calls _check_dict(kwargs) return f(*args, **kwargs) return wrapper def validate_mysql_int(*args, **kwargs): """ Make sure that all arguments are less than 2 ** 31 - 1. This limitation is introduced because mysql stores INT in 4 bytes. If the validation fails for some argument, exception.Invalid is raised with appropriate information. """ max_int = (2 ** 31) - 1 for param in args: if param > max_int: msg = _("Value %(value)d out of range, " "must not exceed %(max)d") % {"value": param, "max": max_int} raise exception.Invalid(msg) for param_str in kwargs: param = kwargs.get(param_str) if param and param > max_int: msg = _("'%(param)s' value out of range, " "must not exceed %(max)d") % {"param": param_str, "max": max_int} raise exception.Invalid(msg) def stash_conf_values(): """ Make a copy of some of the current global CONF's settings. Allows determining if any of these values have changed when the config is reloaded. """ conf = { 'bind_host': CONF.bind_host, 'bind_port': CONF.bind_port, 'tcp_keepidle': CONF.cert_file, 'backlog': CONF.backlog, 'key_file': CONF.key_file, 'cert_file': CONF.cert_file } return conf def split_filter_op(expression): """Split operator from threshold in an expression. Designed for use on a comparative-filtering query field. When no operator is found, default to an equality comparison. :param expression: the expression to parse :returns: a tuple (operator, threshold) parsed from expression """ left, sep, right = expression.partition(':') if sep: op = left threshold = right else: op = 'eq' # default operator threshold = left # NOTE stevelle decoding escaped values may be needed later return op, threshold def validate_quotes(value): """Validate filter values Validation opening/closing quotes in the expression. """ open_quotes = True for i in range(len(value)): if value[i] == '"': if i and value[i - 1] == '\\': continue if open_quotes: if i and value[i - 1] != ',': msg = _("Invalid filter value %s. There is no comma " "before opening quotation mark.") % value raise exception.InvalidParameterValue(message=msg) else: if i + 1 != len(value) and value[i + 1] != ",": msg = _("Invalid filter value %s. There is no comma " "after closing quotation mark.") % value raise exception.InvalidParameterValue(message=msg) open_quotes = not open_quotes if not open_quotes: msg = _("Invalid filter value %s. The quote is not closed.") % value raise exception.InvalidParameterValue(message=msg) def split_filter_value_for_quotes(value): """Split filter values Split values by commas and quotes for 'in' operator, according api-wg. """ validate_quotes(value) tmp = re.compile(r''' "( # if found a double-quote [^\"\\]* # take characters either non-quotes or backslashes (?:\\. # take backslashes and character after it [^\"\\]*)* # take characters either non-quotes or backslashes ) # before double-quote ",? # a double-quote with comma maybe | ([^,]+),? # if not found double-quote take any non-comma # characters with comma maybe | , # if we have only comma take empty string ''', re.VERBOSE) return [val[0] or val[1] for val in re.findall(tmp, value)] def evaluate_filter_op(value, operator, threshold): """Evaluate a comparison operator. Designed for use on a comparative-filtering query field. :param value: evaluated against the operator, as left side of expression :param operator: any supported filter operation :param threshold: to compare value against, as right side of expression :raises: InvalidFilterOperatorValue if an unknown operator is provided :returns: boolean result of applied comparison """ if operator == 'gt': return value > threshold elif operator == 'gte': return value >= threshold elif operator == 'lt': return value < threshold elif operator == 'lte': return value <= threshold elif operator == 'neq': return value != threshold elif operator == 'eq': return value == threshold msg = _("Unable to filter on a unknown operator.") raise exception.InvalidFilterOperatorValue(msg) glance-12.0.0/glance/common/__init__.py0000664000567000056710000000000012701407047021006 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/common/store_utils.py0000664000567000056710000001133012701407047021633 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import glance_store as store_api from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import six.moves.urllib.parse as urlparse import glance.db as db_api from glance.i18n import _LE, _LW from glance import scrubber LOG = logging.getLogger(__name__) CONF = cfg.CONF RESTRICTED_URI_SCHEMAS = frozenset(['file', 'filesystem', 'swift+config']) def safe_delete_from_backend(context, image_id, location): """ Given a location, delete an image from the store and update location status to db. This function try to handle all known exceptions which might be raised by those calls on store and DB modules in its implementation. :param context: The request context :param image_id: The image identifier :param location: The image location entry """ try: ret = store_api.delete_from_backend(location['url'], context=context) location['status'] = 'deleted' if 'id' in location: db_api.get_api().image_location_delete(context, image_id, location['id'], 'deleted') return ret except store_api.NotFound: msg = _LW('Failed to delete image %s in store from URI') % image_id LOG.warn(msg) except store_api.StoreDeleteNotSupported as e: LOG.warn(encodeutils.exception_to_unicode(e)) except store_api.UnsupportedBackend: exc_type = sys.exc_info()[0].__name__ msg = (_LE('Failed to delete image %(image_id)s from store: %(exc)s') % dict(image_id=image_id, exc=exc_type)) LOG.error(msg) def schedule_delayed_delete_from_backend(context, image_id, location): """ Given a location, schedule the deletion of an image location and update location status to db. :param context: The request context :param image_id: The image identifier :param location: The image location entry """ db_queue = scrubber.get_scrub_queue() if not CONF.use_user_token: context = None ret = db_queue.add_location(image_id, location) if ret: location['status'] = 'pending_delete' if 'id' in location: # NOTE(zhiyan): New added image location entry will has no 'id' # field since it has not been saved to DB. db_api.get_api().image_location_delete(context, image_id, location['id'], 'pending_delete') else: db_api.get_api().image_location_add(context, image_id, location) return ret def delete_image_location_from_backend(context, image_id, location): """ Given a location, immediately or schedule the deletion of an image location and update location status to db. :param context: The request context :param image_id: The image identifier :param location: The image location entry """ deleted = False if CONF.delayed_delete: deleted = schedule_delayed_delete_from_backend(context, image_id, location) if not deleted: # NOTE(zhiyan) If image metadata has not been saved to DB # such as uploading process failure then we can't use # location status mechanism to support image pending delete. safe_delete_from_backend(context, image_id, location) def validate_external_location(uri): """ Validate if URI of external location are supported. Only over non-local store types are OK, i.e. S3, Swift, HTTP. Note the absence of 'file://' for security reasons, see LP bug #942118, 1400966, 'swift+config://' is also absent for security reasons, see LP bug #1334196. :param uri: The URI of external image location. :returns: Whether given URI of external image location are OK. """ if not uri: return False # TODO(zhiyan): This function could be moved to glance_store. # TODO(gm): Use a whitelist of allowed schemes scheme = urlparse.urlparse(uri).scheme return (scheme in store_api.get_known_schemes() and scheme not in RESTRICTED_URI_SCHEMAS) glance-12.0.0/glance/common/wsgi.py0000664000567000056710000010531612701407051020233 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack Foundation # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods for working with WSGI servers """ from __future__ import print_function import errno import functools import os import signal import sys import time import eventlet from eventlet.green import socket from eventlet.green import ssl import eventlet.greenio import eventlet.wsgi import glance_store from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import strutils import routes import routes.middleware import six import webob.dec import webob.exc from webob import multidict from glance.common import config from glance.common import exception from glance.common import utils from glance import i18n from glance.i18n import _, _LE, _LI, _LW bind_opts = [ cfg.StrOpt('bind_host', default='0.0.0.0', help=_('Address to bind the server. Useful when ' 'selecting a particular network interface.')), cfg.PortOpt('bind_port', help=_('The port on which the server will listen.')), ] socket_opts = [ cfg.IntOpt('backlog', default=4096, help=_('The backlog value that will be used when creating the ' 'TCP listener socket.')), cfg.IntOpt('tcp_keepidle', default=600, help=_('The value for the socket option TCP_KEEPIDLE. This is ' 'the time in seconds that the connection must be idle ' 'before TCP starts sending keepalive probes.')), cfg.StrOpt('ca_file', help=_('CA certificate file to use to verify ' 'connecting clients.')), cfg.StrOpt('cert_file', help=_('Certificate file to use when starting API ' 'server securely.')), cfg.StrOpt('key_file', help=_('Private key file to use when starting API ' 'server securely.')), ] eventlet_opts = [ cfg.IntOpt('workers', help=_('The number of child process workers that will be ' 'created to service requests. The default will be ' 'equal to the number of CPUs available.')), cfg.IntOpt('max_header_line', default=16384, help=_('Maximum line size of message headers to be accepted. ' 'max_header_line may need to be increased when using ' 'large tokens (typically those generated by the ' 'Keystone v3 API with big service catalogs')), cfg.BoolOpt('http_keepalive', default=True, help=_('If False, server will return the header ' '"Connection: close", ' 'If True, server will return "Connection: Keep-Alive" ' 'in its responses. In order to close the client socket ' 'connection explicitly after the response is sent and ' 'read successfully by the client, you simply have to ' 'set this option to False when you create a wsgi ' 'server.')), cfg.IntOpt('client_socket_timeout', default=900, help=_('Timeout for client connections\' socket operations. ' 'If an incoming connection is idle for this number of ' 'seconds it will be closed. A value of \'0\' means ' 'wait forever.')), ] profiler_opts = [ cfg.BoolOpt("enabled", default=False, help=_('If False fully disable profiling feature.')), cfg.BoolOpt("trace_sqlalchemy", default=False, help=_("If False doesn't trace SQL requests.")), cfg.StrOpt("hmac_keys", default="SECRET_KEY", help=_("Secret key to use to sign Glance API and Glance " "Registry services tracing messages.")), ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(bind_opts) CONF.register_opts(socket_opts) CONF.register_opts(eventlet_opts) CONF.register_opts(profiler_opts, group="profiler") ASYNC_EVENTLET_THREAD_POOL_LIST = [] def get_num_workers(): """Return the configured number of workers.""" if CONF.workers is None: # None implies the number of CPUs return processutils.get_worker_count() return CONF.workers def get_bind_addr(default_port=None): """Return the host and port to bind to.""" return (CONF.bind_host, CONF.bind_port or default_port) def ssl_wrap_socket(sock): """ Wrap an existing socket in SSL :param sock: non-SSL socket to wrap :returns: An SSL wrapped socket """ utils.validate_key_cert(CONF.key_file, CONF.cert_file) ssl_kwargs = { 'server_side': True, 'certfile': CONF.cert_file, 'keyfile': CONF.key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.ca_file: ssl_kwargs['ca_certs'] = CONF.ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED return ssl.wrap_socket(sock, **ssl_kwargs) def get_socket(default_port): """ Bind socket to bind ip:port in conf note: Mostly comes from Swift with a few small changes... :param default_port: port to bind to if none is specified in conf :returns: a socket object as returned from socket.listen or ssl.wrap_socket if conf specifies cert_file """ bind_addr = get_bind_addr(default_port) # TODO(jaypipes): eventlet's greened socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix address_family = [ addr[0] for addr in socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) if addr[0] in (socket.AF_INET, socket.AF_INET6) ][0] use_ssl = CONF.key_file or CONF.cert_file if use_ssl and (not CONF.key_file or not CONF.cert_file): raise RuntimeError(_("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) sock = utils.get_test_suite_socket() retry_until = time.time() + 30 while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=CONF.backlog, family=address_family) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not sock: raise RuntimeError(_("Could not bind to %(host)s:%(port)s after" " trying for 30 seconds") % {'host': bind_addr[0], 'port': bind_addr[1]}) return sock def set_eventlet_hub(): try: eventlet.hubs.use_hub('poll') except Exception: try: eventlet.hubs.use_hub('selects') except Exception: msg = _("eventlet 'poll' nor 'selects' hubs are available " "on this platform") raise exception.WorkerCreationFailure( reason=msg) def initialize_glance_store(): """Initialize glance store.""" glance_store.register_opts(CONF) glance_store.create_stores(CONF) glance_store.verify_default_store() def get_asynchronous_eventlet_pool(size=1000): """Return eventlet pool to caller. Also store pools created in global list, to wait on it after getting signal for graceful shutdown. :param size: eventlet pool size :returns: eventlet pool """ global ASYNC_EVENTLET_THREAD_POOL_LIST pool = eventlet.GreenPool(size=size) # Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool) return pool class Server(object): """Server class to manage multiple WSGI sockets and applications. This class requires initialize_glance_store set to True if glance store needs to be initialized. """ def __init__(self, threads=1000, initialize_glance_store=False): os.umask(0o27) # ensure files are created with the correct privileges self._logger = logging.getLogger("eventlet.wsgi.server") self.threads = threads self.children = set() self.stale_children = set() self.running = True # NOTE(abhishek): Allows us to only re-initialize glance_store when # the API's configuration reloads. self.initialize_glance_store = initialize_glance_store self.pgid = os.getpid() try: # NOTE(flaper87): Make sure this process # runs in its own process group. os.setpgid(self.pgid, self.pgid) except OSError: # NOTE(flaper87): When running glance-control, # (glance's functional tests, for example) # setpgid fails with EPERM as glance-control # creates a fresh session, of which the newly # launched service becomes the leader (session # leaders may not change process groups) # # Running glance-(api|registry) is safe and # shouldn't raise any error here. self.pgid = 0 def hup(self, *args): """ Reloads configuration files with zero down time """ signal.signal(signal.SIGHUP, signal.SIG_IGN) raise exception.SIGHUPInterrupt def kill_children(self, *args): """Kills the entire process group.""" signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGINT, signal.SIG_IGN) self.running = False os.killpg(self.pgid, signal.SIGTERM) def start(self, application, default_port): """ Run a WSGI server with the given application. :param application: The application to be run in the WSGI server :param default_port: Port to bind to if none is specified in conf """ self.application = application self.default_port = default_port self.configure() self.start_wsgi() def start_wsgi(self): workers = get_num_workers() if workers == 0: # Useful for profiling, test, debug etc. self.pool = self.create_pool() self.pool.spawn_n(self._single_run, self.application, self.sock) return else: LOG.info(_LI("Starting %d workers"), workers) signal.signal(signal.SIGTERM, self.kill_children) signal.signal(signal.SIGINT, self.kill_children) signal.signal(signal.SIGHUP, self.hup) while len(self.children) < workers: self.run_child() def create_pool(self): return get_asynchronous_eventlet_pool(size=self.threads) def _remove_children(self, pid): if pid in self.children: self.children.remove(pid) LOG.info(_LI('Removed dead child %s'), pid) elif pid in self.stale_children: self.stale_children.remove(pid) LOG.info(_LI('Removed stale child %s'), pid) else: LOG.warn(_LW('Unrecognised child %s') % pid) def _verify_and_respawn_children(self, pid, status): if len(self.stale_children) == 0: LOG.debug('No stale children') if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: LOG.error(_LE('Not respawning child %d, cannot ' 'recover from termination') % pid) if not self.children and not self.stale_children: LOG.info( _LI('All workers have terminated. Exiting')) self.running = False else: if len(self.children) < get_num_workers(): self.run_child() def wait_on_children(self): while self.running: try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): self._remove_children(pid) self._verify_and_respawn_children(pid, status) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: LOG.info(_LI('Caught keyboard interrupt. Exiting.')) break except exception.SIGHUPInterrupt: self.reload() continue eventlet.greenio.shutdown_safe(self.sock) self.sock.close() LOG.debug('Exited') def configure(self, old_conf=None, has_changed=None): """ Apply configuration settings :param old_conf: Cached old configuration settings (if any) :param has changed: callable to determine if a parameter has changed """ eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line self.client_socket_timeout = CONF.client_socket_timeout or None self.configure_socket(old_conf, has_changed) if self.initialize_glance_store: initialize_glance_store() def reload(self): """ Reload and re-apply configuration settings Existing child processes are sent a SIGHUP signal and will exit after completing existing requests. New child processes, which will have the updated configuration, are spawned. This allows preventing interruption to the service. """ def _has_changed(old, new, param): old = old.get(param) new = getattr(new, param) return (new != old) old_conf = utils.stash_conf_values() has_changed = functools.partial(_has_changed, old_conf, CONF) CONF.reload_config_files() os.killpg(self.pgid, signal.SIGHUP) self.stale_children = self.children self.children = set() # Ensure any logging config changes are picked up logging.setup(CONF, 'glance') config.set_config_defaults() self.configure(old_conf, has_changed) self.start_wsgi() def wait(self): """Wait until all servers have completed running.""" try: if self.children: self.wait_on_children() else: self.pool.waitall() except KeyboardInterrupt: pass def run_child(self): def child_hup(*args): """Shuts down child processes, existing requests are handled.""" signal.signal(signal.SIGHUP, signal.SIG_IGN) eventlet.wsgi.is_accepting = False self.sock.close() pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, child_hup) signal.signal(signal.SIGTERM, signal.SIG_DFL) # ignore the interrupt signal to avoid a race whereby # a child worker receives the signal before the parent # and is respawned unnecessarily as a result signal.signal(signal.SIGINT, signal.SIG_IGN) # The child has no need to stash the unwrapped # socket, and the reference prevents a clean # exit on sighup self._sock = None self.run_server() LOG.info(_LI('Child %d exiting normally'), os.getpid()) # self.pool.waitall() is now called in wsgi's server so # it's safe to exit here sys.exit(0) else: LOG.info(_LI('Started child %s'), pid) self.children.add(pid) def run_server(self): """Run a WSGI server.""" if cfg.CONF.pydev_worker_debug_host: utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host, cfg.CONF.pydev_worker_debug_port) eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0" self.pool = self.create_pool() try: eventlet.wsgi.server(self.sock, self.application, log=self._logger, custom_pool=self.pool, debug=False, keepalive=CONF.http_keepalive, socket_timeout=self.client_socket_timeout) except socket.error as err: if err[0] != errno.EINVAL: raise # waiting on async pools if ASYNC_EVENTLET_THREAD_POOL_LIST: for pool in ASYNC_EVENTLET_THREAD_POOL_LIST: pool.waitall() def _single_run(self, application, sock): """Start a WSGI server in a new green thread.""" LOG.info(_LI("Starting single process server")) eventlet.wsgi.server(sock, application, custom_pool=self.pool, log=self._logger, debug=False, keepalive=CONF.http_keepalive, socket_timeout=self.client_socket_timeout) def configure_socket(self, old_conf=None, has_changed=None): """ Ensure a socket exists and is appropriately configured. This function is called on start up, and can also be called in the event of a configuration reload. When called for the first time a new socket is created. If reloading and either bind_host or bind port have been changed the existing socket must be closed and a new socket opened (laws of physics). In all other cases (bind_host/bind_port have not changed) the existing socket is reused. :param old_conf: Cached old configuration settings (if any) :param has changed: callable to determine if a parameter has changed """ # Do we need a fresh socket? new_sock = (old_conf is None or ( has_changed('bind_host') or has_changed('bind_port'))) # Will we be using https? use_ssl = not (not CONF.cert_file or not CONF.key_file) # Were we using https before? old_use_ssl = (old_conf is not None and not ( not old_conf.get('key_file') or not old_conf.get('cert_file'))) # Do we now need to perform an SSL wrap on the socket? wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock) # Do we now need to perform an SSL unwrap on the socket? unwrap_sock = use_ssl is False and old_use_ssl is True if new_sock: self._sock = None if old_conf is not None: self.sock.close() _sock = get_socket(self.default_port) _sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive _sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self._sock = _sock if wrap_sock: self.sock = ssl_wrap_socket(self._sock) if unwrap_sock: self.sock = self._sock if new_sock and not use_ssl: self.sock = self._sock # Pick up newly deployed certs if old_conf is not None and use_ssl is True and old_use_ssl is True: if has_changed('cert_file') or has_changed('key_file'): utils.validate_key_cert(CONF.key_file, CONF.cert_file) if has_changed('cert_file'): self.sock.certfile = CONF.cert_file if has_changed('key_file'): self.sock.keyfile = CONF.key_file if new_sock or (old_conf is not None and has_changed('tcp_keepidle')): # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) if old_conf is not None and has_changed('backlog'): self.sock.listen(CONF.backlog) class Middleware(object): """ Base WSGI middleware wrapper. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ def __init__(self, application): self.application = application @classmethod def factory(cls, global_conf, **local_conf): def filter(app): return cls(app) return filter def process_request(self, req): """ Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) response.request = req try: return self.process_response(response) except webob.exc.HTTPException as e: return e class Debug(Middleware): """ Helper class that can be inserted into any WSGI application chain to get information about the request and response. """ @webob.dec.wsgify def __call__(self, req): print(("*" * 40) + " REQUEST ENVIRON") for key, value in req.environ.items(): print(key, "=", value) print('') resp = req.get_response(self.application) print(("*" * 40) + " RESPONSE HEADERS") for (key, value) in six.iteritems(resp.headers): print(key, "=", value) print('') resp.app_iter = self.print_generator(resp.app_iter) return resp @staticmethod def print_generator(app_iter): """ Iterator that prints the contents of a wrapper string iterator when iterated. """ print(("*" * 40) + " BODY") for part in app_iter: sys.stdout.write(part) sys.stdout.flush() yield part print() class APIMapper(routes.Mapper): """ Handle route matching when url is '' because routes.Mapper returns an error in this case. """ def routematch(self, url=None, environ=None): if url is "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) class RejectMethodController(object): def reject(self, req, allowed_methods, *args, **kwargs): LOG.debug("The method %s is not allowed for this resource", req.environ['REQUEST_METHOD']) raise webob.exc.HTTPMethodNotAllowed( headers=[('Allow', allowed_methods)]) class Router(object): """ WSGI middleware that maps incoming requests to WSGI apps. """ def __init__(self, mapper): """ Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as well and have your controller be a wsgi.Controller, who will route the request to the action method. Examples: mapper = routes.Mapper() sc = ServerController() # Explicit mapping of one route to a controller+action mapper.connect(None, "/svrlist", controller=sc, action="list") # Actions are all implicitly defined mapper.resource("server", "servers", controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ mapper.redirect("", "/") self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @classmethod def factory(cls, global_conf, **local_conf): return cls(APIMapper()) @webob.dec.wsgify def __call__(self, req): """ Route the incoming request to a controller based on self.map. If no match, return either a 404(Not Found) or 501(Not Implemented). """ return self._router @staticmethod @webob.dec.wsgify def _dispatch(req): """ Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404, 501, or the routed WSGI app's response. """ match = req.environ['wsgiorg.routing_args'][1] if not match: implemented_http_methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'PATCH'] if req.environ['REQUEST_METHOD'] not in implemented_http_methods: return webob.exc.HTTPNotImplemented() else: return webob.exc.HTTPNotFound() app = match['controller'] return app class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def best_match_content_type(self): """Determine the requested response content-type.""" supported = ('application/json',) bm = self.accept.best_match(supported) return bm or 'application/json' def get_content_type(self, allowed_content_types): """Determine content type of the request body.""" if "Content-Type" not in self.headers: raise exception.InvalidContentType(content_type=None) content_type = self.content_type if content_type not in allowed_content_types: raise exception.InvalidContentType(content_type=content_type) else: return content_type def best_match_language(self): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None langs = i18n.get_available_languages('glance') return self.accept_language.best_match(langs) def get_content_range(self): """Return the `Range` in a request.""" range_str = self.headers.get('Content-Range') if range_str is not None: range_ = webob.byterange.ContentRange.parse(range_str) if range_ is None: msg = _('Malformed Content-Range header: %s') % range_str raise webob.exc.HTTPBadRequest(explanation=msg) return range_ class JSONRequestDeserializer(object): valid_transfer_encoding = frozenset(['chunked', 'compress', 'deflate', 'gzip', 'identity']) def has_body(self, request): """ Returns whether a Webob.Request object will possess an entity body. :param request: Webob.Request object """ request_encoding = request.headers.get('transfer-encoding', '').lower() is_valid_encoding = request_encoding in self.valid_transfer_encoding if is_valid_encoding and request.is_body_readable: return True elif request.content_length is not None and request.content_length > 0: return True return False @staticmethod def _sanitizer(obj): """Sanitizer method that will be passed to jsonutils.loads.""" return obj def from_json(self, datastring): try: jsondata = jsonutils.loads(datastring, object_hook=self._sanitizer) if not isinstance(jsondata, (dict, list)): msg = _('Unexpected body type. Expected list/dict.') raise webob.exc.HTTPBadRequest(explanation=msg) return jsondata except ValueError: msg = _('Malformed JSON in request body.') raise webob.exc.HTTPBadRequest(explanation=msg) def default(self, request): if self.has_body(request): return {'body': self.from_json(request.body)} else: return {} class JSONResponseSerializer(object): def _sanitizer(self, obj): """Sanitizer method that will be passed to jsonutils.dumps.""" if hasattr(obj, "to_dict"): return obj.to_dict() if isinstance(obj, multidict.MultiDict): return obj.mixed() return jsonutils.to_primitive(obj) def to_json(self, data): return jsonutils.dump_as_bytes(data, default=self._sanitizer) def default(self, response, result): response.content_type = 'application/json' body = self.to_json(result) body = encodeutils.to_utf8(body) response.body = body def translate_exception(req, e): """Translates all translatable elements of the given exception.""" # The RequestClass attribute in the webob.dec.wsgify decorator # does not guarantee that the request object will be a particular # type; this check is therefore necessary. if not hasattr(req, "best_match_language"): return e locale = req.best_match_language() if isinstance(e, webob.exc.HTTPError): e.explanation = i18n.translate(e.explanation, locale) e.detail = i18n.translate(e.detail, locale) if getattr(e, 'body_template', None): e.body_template = i18n.translate(e.body_template, locale) return e class Resource(object): """ WSGI app that handles (de)serialization and controller dispatch. Reads routing information supplied by RoutesMiddleware and calls the requested action method upon its deserializer, controller, and serializer. Those three objects may implement any of the basic controller action methods (create, update, show, index, delete) along with any that may be specified in the api router. A 'default' method may also be implemented to be used in place of any non-implemented actions. Deserializer methods must accept a request argument and return a dictionary. Controller methods must accept a request argument. Additionally, they must also accept keyword arguments that represent the keys returned by the Deserializer. They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib :param deserializer: object that supports webob request deserialization through controller-like actions :param serializer: object that supports webob response serialization through controller-like actions """ self.controller = controller self.serializer = serializer or JSONResponseSerializer() self.deserializer = deserializer or JSONRequestDeserializer() @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) body_reject = strutils.bool_from_string( action_args.pop('body_reject', None)) try: if body_reject and self.deserializer.has_body(request): msg = _('A body is not expected with this request.') raise webob.exc.HTTPBadRequest(explanation=msg) deserialized_request = self.dispatch(self.deserializer, action, request) action_args.update(deserialized_request) action_result = self.dispatch(self.controller, action, request, **action_args) except webob.exc.WSGIHTTPException as e: exc_info = sys.exc_info() e = translate_exception(request, e) six.reraise(type(e), e, exc_info[2]) except UnicodeDecodeError: msg = _("Error decoding your request. Either the URL or the " "request body contained characters that could not be " "decoded by Glance") raise webob.exc.HTTPBadRequest(explanation=msg) except Exception as e: LOG.exception(_LE("Caught error: %s"), encodeutils.exception_to_unicode(e)) response = webob.exc.HTTPInternalServerError() return response try: response = webob.Response(request=request) self.dispatch(self.serializer, action, response, action_result) # encode all headers in response to utf-8 to prevent unicode errors for name, value in list(response.headers.items()): if six.PY2 and isinstance(value, six.text_type): response.headers[name] = encodeutils.safe_encode(value) return response except webob.exc.WSGIHTTPException as e: return translate_exception(request, e) except webob.exc.HTTPException as e: return e # return unserializable result (typically a webob exc) except Exception: return action_result def dispatch(self, obj, action, *args, **kwargs): """Find action-specific method on self and call it.""" try: method = getattr(obj, action) except AttributeError: method = getattr(obj, 'default') return method(*args, **kwargs) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args glance-12.0.0/glance/common/jsonpatchvalidator.py0000664000567000056710000001207312701407047023163 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A mixin that validates the given body for jsonpatch-compatibility. The methods supported are limited to listed in METHODS_ALLOWED """ import re import jsonschema import glance.common.exception as exc from glance.i18n import _ class JsonPatchValidatorMixin(object): # a list of allowed methods allowed according to RFC 6902 ALLOWED = ["replace", "test", "remove", "add", "copy"] PATH_REGEX_COMPILED = re.compile("^/[^/]+(/[^/]+)*$") def __init__(self, methods_allowed=None): if methods_allowed is None: methods_allowed = ["replace", "remove"] self.schema = self._gen_schema(methods_allowed) self.methods_allowed = [m for m in methods_allowed if m in self.ALLOWED] @staticmethod def _gen_schema(methods_allowed): """ Generates a jsonschema for jsonpatch request based on methods_allowed """ # op replace needs no 'value' param, so needs a special schema if # present in methods_allowed basic_schema = { "type": "array", "items": {"properties": {"op": {"type": "string", "enum": methods_allowed}, "path": {"type": "string"}, "value": {"type": ["string", "object", "integer", "array", "boolean"]} }, "required": ["op", "path", "value"], "type": "object"}, "$schema": "http://json-schema.org/draft-04/schema#" } if "remove" in methods_allowed: methods_allowed.remove("remove") no_remove_op_schema = { "type": "object", "properties": { "op": {"type": "string", "enum": methods_allowed}, "path": {"type": "string"}, "value": {"type": ["string", "object", "integer", "array", "boolean"]} }, "required": ["op", "path", "value"]} op_remove_only_schema = { "type": "object", "properties": { "op": {"type": "string", "enum": ["remove"]}, "path": {"type": "string"} }, "required": ["op", "path"]} basic_schema = { "type": "array", "items": { "oneOf": [no_remove_op_schema, op_remove_only_schema]}, "$schema": "http://json-schema.org/draft-04/schema#" } return basic_schema def validate_body(self, body): try: jsonschema.validate(body, self.schema) # now make sure everything is ok with path return [{"path": self._decode_json_pointer(e["path"]), "value": e.get("value", None), "op": e["op"]} for e in body] except jsonschema.ValidationError: raise exc.InvalidJsonPatchBody(body=body, schema=self.schema) def _check_for_path_errors(self, pointer): if not re.match(self.PATH_REGEX_COMPILED, pointer): msg = _("Json path should start with a '/', " "end with no '/', no 2 subsequent '/' are allowed.") raise exc.InvalidJsonPatchPath(path=pointer, explanation=msg) if re.search('~[^01]', pointer) or pointer.endswith('~'): msg = _("Pointer contains '~' which is not part of" " a recognized escape sequence [~0, ~1].") raise exc.InvalidJsonPatchPath(path=pointer, explanation=msg) def _decode_json_pointer(self, pointer): """Parses a json pointer. Returns a pointer as a string. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0". """ self._check_for_path_errors(pointer) ret = [] for part in pointer.lstrip('/').split('/'): ret.append(part.replace('~1', '/').replace('~0', '~').strip()) return '/'.join(ret) glance-12.0.0/glance/common/crypt.py0000664000567000056710000000535112701407047020426 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for URL-safe encrypting/decrypting """ import base64 from Crypto.Cipher import AES from Crypto import Random from Crypto.Random import random from oslo_utils import encodeutils import six # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range def urlsafe_encrypt(key, plaintext, blocksize=16): """ Encrypts plaintext. Resulting ciphertext will contain URL-safe characters. If plaintext is Unicode, encode it to UTF-8 before encryption. :param key: AES secret key :param plaintext: Input text to be encrypted :param blocksize: Non-zero integer multiple of AES blocksize in bytes (16) :returns: Resulting ciphertext """ def pad(text): """ Pads text to be encrypted """ pad_length = (blocksize - len(text) % blocksize) sr = random.StrongRandom() pad = b''.join(six.int2byte(sr.randint(1, 0xFF)) for i in range(pad_length - 1)) # We use chr(0) as a delimiter between text and padding return text + b'\0' + pad plaintext = encodeutils.to_utf8(plaintext) key = encodeutils.to_utf8(key) # random initial 16 bytes for CBC init_vector = Random.get_random_bytes(16) cypher = AES.new(key, AES.MODE_CBC, init_vector) padded = cypher.encrypt(pad(six.binary_type(plaintext))) encoded = base64.urlsafe_b64encode(init_vector + padded) if six.PY3: encoded = encoded.decode('ascii') return encoded def urlsafe_decrypt(key, ciphertext): """ Decrypts URL-safe base64 encoded ciphertext. On Python 3, the result is decoded from UTF-8. :param key: AES secret key :param ciphertext: The encrypted text to decrypt :returns: Resulting plaintext """ # Cast from unicode ciphertext = encodeutils.to_utf8(ciphertext) key = encodeutils.to_utf8(key) ciphertext = base64.urlsafe_b64decode(ciphertext) cypher = AES.new(key, AES.MODE_CBC, ciphertext[:16]) padded = cypher.decrypt(ciphertext[16:]) text = padded[:padded.rfind(b'\0')] if six.PY3: text = text.decode('utf-8') return text glance-12.0.0/glance/common/config.py0000664000567000056710000003366712701407047020545 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Routines for configuring Glance """ import logging import logging.config import logging.handlers import os from oslo_config import cfg from oslo_middleware import cors from oslo_policy import policy from paste import deploy from glance.i18n import _ from glance.version import version_info as version paste_deploy_opts = [ cfg.StrOpt('flavor', help=_('Partial name of a pipeline in your paste configuration ' 'file with the service name removed. For example, if ' 'your paste section name is ' '[pipeline:glance-api-keystone] use the value ' '"keystone"')), cfg.StrOpt('config_file', help=_('Name of the paste configuration file.')), ] image_format_opts = [ cfg.ListOpt('container_formats', default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova', 'docker'], help=_("Supported values for the 'container_format' " "image attribute"), deprecated_opts=[cfg.DeprecatedOpt('container_formats', group='DEFAULT')]), cfg.ListOpt('disk_formats', default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'], help=_("Supported values for the 'disk_format' " "image attribute"), deprecated_opts=[cfg.DeprecatedOpt('disk_formats', group='DEFAULT')]), ] task_opts = [ cfg.IntOpt('task_time_to_live', default=48, help=_("Time in hours for which a task lives after, either " "succeeding or failing"), deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live', group='DEFAULT')]), cfg.StrOpt('task_executor', default='taskflow', help=_("Specifies which task executor to be used to run the " "task scripts.")), cfg.StrOpt('work_dir', help=_('Work dir for asynchronous task operations. ' 'The directory set here will be used to operate over ' 'images - normally before they are imported in the ' 'destination store. When providing work dir, make sure ' 'enough space is provided for concurrent tasks to run ' 'efficiently without running out of space. A rough ' 'estimation can be done by multiplying the number of ' '`max_workers` - or the N of workers running - by an ' 'average image size (e.g 500MB). The image size ' 'estimation should be done based on the average size in ' 'your deployment. Note that depending on the tasks ' 'running you may need to multiply this number by some ' 'factor depending on what the task does. For example, ' 'you may want to double the available size if image ' 'conversion is enabled. All this being said, remember ' 'these are just estimations and you should do them ' 'based on the worst case scenario and be prepared to ' 'act in case they were wrong.')), ] common_opts = [ cfg.BoolOpt('allow_additional_image_properties', default=True, help=_('Whether to allow users to specify image properties ' 'beyond what the image schema provides')), cfg.IntOpt('image_member_quota', default=128, help=_('Maximum number of image members per image. ' 'Negative values evaluate to unlimited.')), cfg.IntOpt('image_property_quota', default=128, help=_('Maximum number of properties allowed on an image. ' 'Negative values evaluate to unlimited.')), cfg.IntOpt('image_tag_quota', default=128, help=_('Maximum number of tags allowed on an image. ' 'Negative values evaluate to unlimited.')), cfg.IntOpt('image_location_quota', default=10, help=_('Maximum number of locations allowed on an image. ' 'Negative values evaluate to unlimited.')), cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api', help=_('Python module path of data access API')), cfg.IntOpt('limit_param_default', default=25, help=_('Default value for the number of items returned by a ' 'request if not specified explicitly in the request')), cfg.IntOpt('api_limit_max', default=1000, help=_('Maximum permissible number of items that could be ' 'returned by a request')), cfg.BoolOpt('show_image_direct_url', default=False, help=_('Whether to include the backend image storage location ' 'in image properties. Revealing storage location can ' 'be a security risk, so use this setting with ' 'caution!')), cfg.BoolOpt('show_multiple_locations', default=False, help=_('Whether to include the backend image locations ' 'in image properties. ' 'For example, if using the file system store a URL of ' '"file:///path/to/image" will be returned to the user ' 'in the \'direct_url\' meta-data field. ' 'Revealing storage location can ' 'be a security risk, so use this setting with ' 'caution! ' 'Setting this to true overrides the ' 'show_image_direct_url option.')), cfg.IntOpt('image_size_cap', default=1099511627776, max=9223372036854775808, help=_("Maximum size of image a user can upload in bytes. " "Defaults to 1099511627776 bytes (1 TB)." "WARNING: this value should only be increased after " "careful consideration and must be set to a value under " "8 EB (9223372036854775808).")), cfg.StrOpt('user_storage_quota', default='0', help=_("Set a system wide quota for every user. This value is " "the total capacity that a user can use across " "all storage systems. A value of 0 means unlimited." "Optional unit can be specified for the value. Accepted " "units are B, KB, MB, GB and TB representing " "Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " "respectively. If no unit is specified then Bytes is " "assumed. Note that there should not be any space " "between value and unit and units are case sensitive.")), cfg.BoolOpt('enable_v1_api', default=True, help=_("Deploy the v1 OpenStack Images API.")), cfg.BoolOpt('enable_v2_api', default=True, help=_("Deploy the v2 OpenStack Images API.")), cfg.BoolOpt('enable_v1_registry', default=True, help=_("Deploy the v1 OpenStack Registry API.")), cfg.BoolOpt('enable_v2_registry', default=True, help=_("Deploy the v2 OpenStack Registry API.")), cfg.StrOpt('pydev_worker_debug_host', help=_('The hostname/IP of the pydev process listening for ' 'debug connections')), cfg.PortOpt('pydev_worker_debug_port', default=5678, help=_('The port on which a pydev process is listening for ' 'connections.')), cfg.StrOpt('metadata_encryption_key', secret=True, help=_('AES key for encrypting store \'location\' metadata. ' 'This includes, if used, Swift or S3 credentials. ' 'Should be set to a random string of length 16, 24 or ' '32 bytes')), cfg.StrOpt('digest_algorithm', default='sha256', help=_('Digest algorithm which will be used for digital ' 'signature. Use the command "openssl list-message-' 'digest-algorithms" to get the available algorithms ' 'supported by the version of OpenSSL on the platform.' ' Examples are "sha1", "sha256", "sha512", etc.')), ] CONF = cfg.CONF CONF.register_opts(paste_deploy_opts, group='paste_deploy') CONF.register_opts(image_format_opts, group='image_format') CONF.register_opts(task_opts, group='task') CONF.register_opts(common_opts) policy.Enforcer(CONF) def parse_args(args=None, usage=None, default_config_files=None): CONF(args=args, project='glance', version=version.cached_version_string(), usage=usage, default_config_files=default_config_files) def parse_cache_args(args=None): config_files = cfg.find_config_files(project='glance', prog='glance-cache') parse_args(args=args, default_config_files=config_files) def _get_deployment_flavor(flavor=None): """ Retrieve the paste_deploy.flavor config item, formatted appropriately for appending to the application name. :param flavor: if specified, use this setting rather than the paste_deploy.flavor configuration setting """ if not flavor: flavor = CONF.paste_deploy.flavor return '' if not flavor else ('-' + flavor) def _get_paste_config_path(): paste_suffix = '-paste.ini' conf_suffix = '.conf' if CONF.config_file: # Assume paste config is in a paste.ini file corresponding # to the last config file path = CONF.config_file[-1].replace(conf_suffix, paste_suffix) else: path = CONF.prog + paste_suffix return CONF.find_file(os.path.basename(path)) def _get_deployment_config_file(): """ Retrieve the deployment_config_file config item, formatted as an absolute pathname. """ path = CONF.paste_deploy.config_file if not path: path = _get_paste_config_path() if not path: msg = _("Unable to locate paste config file for %s.") % CONF.prog raise RuntimeError(msg) return os.path.abspath(path) def load_paste_app(app_name, flavor=None, conf_file=None): """ Builds and returns a WSGI app from a paste config file. We assume the last config file specified in the supplied ConfigOpts object is the paste config file, if conf_file is None. :param app_name: name of the application to load :param flavor: name of the variant of the application to load :param conf_file: path to the paste config file :raises: RuntimeError when config file cannot be located or application cannot be loaded from config file """ # append the deployment flavor to the application name, # in order to identify the appropriate paste pipeline app_name += _get_deployment_flavor(flavor) if not conf_file: conf_file = _get_deployment_config_file() try: logger = logging.getLogger(__name__) logger.debug("Loading %(app_name)s from %(conf_file)s", {'conf_file': conf_file, 'app_name': app_name}) app = deploy.loadapp("config:%s" % conf_file, name=app_name) # Log the options used when starting if we're in debug mode... if CONF.debug: CONF.log_opt_values(logger, logging.DEBUG) return app except (LookupError, ImportError) as e: msg = (_("Unable to load %(app_name)s from " "configuration file %(conf_file)s." "\nGot: %(e)r") % {'app_name': app_name, 'conf_file': conf_file, 'e': e}) logger.error(msg) raise RuntimeError(msg) def set_config_defaults(): """This method updates all configuration default values.""" set_cors_middleware_defaults() def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['Content-MD5', 'X-Image-Meta-Checksum', 'X-Storage-Token', 'Accept-Encoding', 'X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], expose_headers=['X-Image-Meta-Checksum', 'X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) glance-12.0.0/glance/common/swift_store_utils.py0000664000567000056710000000710012701407047023047 0ustar jenkinsjenkins00000000000000# Copyright 2014 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from six.moves import configparser from glance.common import exception from glance.i18n import _, _LE swift_opts = [ cfg.StrOpt('default_swift_reference', default="ref1", help=_('The reference to the default swift account/backing ' 'store parameters to use for adding new images.')), cfg.StrOpt('swift_store_auth_address', help=_('The address where the Swift authentication service ' 'is listening.(deprecated)')), cfg.StrOpt('swift_store_user', secret=True, help=_('The user to authenticate against the Swift ' 'authentication service (deprecated)')), cfg.StrOpt('swift_store_key', secret=True, help=_('Auth key for the user authenticating against the ' 'Swift authentication service. (deprecated)')), cfg.StrOpt('swift_store_config_file', secret=True, help=_('The config file that has the swift account(s)' 'configs.')), ] CONFIG = configparser.SafeConfigParser() LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(swift_opts) def is_multiple_swift_store_accounts_enabled(): if CONF.swift_store_config_file is None: return False return True class SwiftParams(object): def __init__(self): if is_multiple_swift_store_accounts_enabled(): self.params = self._load_config() else: self.params = self._form_default_params() def _form_default_params(self): default = {} if (CONF.swift_store_user and CONF.swift_store_key and CONF.swift_store_auth_address): default['user'] = CONF.swift_store_user default['key'] = CONF.swift_store_key default['auth_address'] = CONF.swift_store_auth_address return {CONF.default_swift_reference: default} return {} def _load_config(self): try: conf_file = CONF.find_file(CONF.swift_store_config_file) CONFIG.read(conf_file) except Exception as e: msg = (_LE("swift config file %(conf_file)s:%(exc)s not found") % {'conf_file': CONF.swift_store_config_file, 'exc': e}) LOG.error(msg) raise exception.InvalidSwiftStoreConfiguration() account_params = {} account_references = CONFIG.sections() for ref in account_references: reference = {} try: reference['auth_address'] = CONFIG.get(ref, 'auth_address') reference['user'] = CONFIG.get(ref, 'user') reference['key'] = CONFIG.get(ref, 'key') account_params[ref] = reference except (ValueError, SyntaxError, configparser.NoOptionError) as e: LOG.exception(_LE("Invalid format of swift store config " "cfg")) return account_params glance-12.0.0/glance/common/wsme_utils.py0000664000567000056710000000442412701407047021460 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from wsme import types as wsme_types from glance.common import timeutils class WSMEModelTransformer(object): def to_dict(self): # Return the wsme_attributes names:values as a dict my_dict = {} for attribute in self._wsme_attributes: value = getattr(self, attribute.name) if value is not wsme_types.Unset: my_dict.update({attribute.name: value}) return my_dict @classmethod def to_wsme_model(model, db_entity, self_link=None, schema=None): # Return the wsme_attributes names:values as a dict names = [] for attribute in model._wsme_attributes: names.append(attribute.name) values = {} for name in names: value = getattr(db_entity, name, None) if value is not None: if type(value) == datetime: iso_datetime_value = timeutils.isotime(value) values.update({name: iso_datetime_value}) else: values.update({name: value}) if schema: values['schema'] = schema model_object = model(**values) # 'self' kwarg is used in wsme.types.Base.__init__(self, ..) and # conflicts during initialization. self_link is a proxy field to self. if self_link: model_object.self = self_link return model_object @classmethod def get_mandatory_attrs(cls): return [attr.name for attr in cls._wsme_attributes if attr.mandatory] def _get_value(obj): if obj is not wsme_types.Unset: return obj else: return None glance-12.0.0/glance/common/scripts/0000775000567000056710000000000012701407204020371 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/common/scripts/utils.py0000664000567000056710000001073212701407047022113 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'get_task', 'unpack_task_input', 'set_base_image_properties', 'validate_location_uri', 'get_image_data_iter', ] from oslo_log import log as logging from six.moves import urllib from glance.common import exception from glance.i18n import _, _LE LOG = logging.getLogger(__name__) def get_task(task_repo, task_id): """Gets a TaskProxy object. :param task_repo: TaskRepo object used to perform DB operations :param task_id: ID of the Task """ task = None try: task = task_repo.get(task_id) except exception.NotFound: msg = _LE('Task not found for task_id %s') % task_id LOG.exception(msg) return task def unpack_task_input(task): """Verifies and returns valid task input dictionary. :param task: Task domain object """ task_input = task.task_input # NOTE: until we support multiple task types, we just check for # input fields related to 'import task'. for key in ["import_from", "import_from_format", "image_properties"]: if key not in task_input: msg = _("Input does not contain '%(key)s' field") % {"key": key} raise exception.Invalid(msg) return task_input def set_base_image_properties(properties=None): """Sets optional base properties for creating Image. :param properties: Input dict to set some base properties """ if isinstance(properties, dict) and len(properties) == 0: # TODO(nikhil): We can make these properties configurable while # implementing the pipeline logic for the scripts. The below shown # are placeholders to show that the scripts work on 'devstack' # environment. properties['disk_format'] = 'qcow2' properties['container_format'] = 'bare' def validate_location_uri(location): """Validate location uri into acceptable format. :param location: Location uri to be validated """ if not location: raise exception.BadStoreUri(_('Invalid location: %s') % location) elif location.startswith(('http://', 'https://')): return location # NOTE: file type uri is being avoided for security reasons, # see LP bug #942118 #1400966. elif location.startswith(("file:///", "filesystem:///")): msg = _("File based imports are not allowed. Please use a non-local " "source of image data.") # NOTE: raise BadStoreUri and let the encompassing block save the error # msg in the task.message. raise exception.BadStoreUri(msg) else: # TODO(nikhil): add other supported uris supported = ['http', ] msg = _("The given uri is not valid. Please specify a " "valid uri from the following list of supported uri " "%(supported)s") % {'supported': supported} raise urllib.error.URLError(msg) def get_image_data_iter(uri): """Returns iterable object either for local file or uri :param uri: uri (remote or local) to the datasource we want to iterate Validation/sanitization of the uri is expected to happen before we get here. """ # NOTE(flaper87): This is safe because the input uri is already # verified before the task is created. if uri.startswith("file://"): uri = uri.split("file://")[-1] # NOTE(flaper87): The caller of this function expects to have # an iterable object. FileObjects in python are iterable, therefore # we are returning it as is. # The file descriptor will be eventually cleaned up by the garbage # collector once its ref-count is dropped to 0. That is, when there # wont be any references pointing to this file. # # We're not using StringIO or other tools to avoid reading everything # into memory. Some images may be quite heavy. return open(uri, "r") return urllib.request.urlopen(uri) glance-12.0.0/glance/common/scripts/__init__.py0000664000567000056710000000410712701407047022511 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from glance.common.scripts.image_import import main as image_import from glance.i18n import _LE, _LI LOG = logging.getLogger(__name__) def run_task(task_id, task_type, context, task_repo=None, image_repo=None, image_factory=None): # TODO(nikhil): if task_repo is None get new task repo # TODO(nikhil): if image_repo is None get new image repo # TODO(nikhil): if image_factory is None get new image factory LOG.info(_LI("Loading known task scripts for task_id %(task_id)s " "of type %(task_type)s"), {'task_id': task_id, 'task_type': task_type}) if task_type == 'import': image_import.run(task_id, context, task_repo, image_repo, image_factory) else: msg = _LE("This task type %(task_type)s is not supported by the " "current deployment of Glance. Please refer the " "documentation provided by OpenStack or your operator " "for more information.") % {'task_type': task_type} LOG.error(msg) task = task_repo.get(task_id) task.fail(msg) if task_repo: task_repo.save(task) else: LOG.error(_LE("Failed to save task %(task_id)s in DB as task_repo " "is %(task_repo)s"), {"task_id": task_id, "task_repo": task_repo}) glance-12.0.0/glance/common/scripts/image_import/0000775000567000056710000000000012701407204023045 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/common/scripts/image_import/__init__.py0000664000567000056710000000000012701407047025151 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/common/scripts/image_import/main.py0000664000567000056710000001442112701407047024352 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'run', ] from oslo_concurrency import lockutils from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils import six from glance.api.v2 import images as v2_api from glance.common import exception from glance.common.scripts import utils as script_utils from glance.common import store_utils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) def run(t_id, context, task_repo, image_repo, image_factory): LOG.info(_LI('Task %(task_id)s beginning import ' 'execution.'), {'task_id': t_id}) _execute(t_id, task_repo, image_repo, image_factory) # NOTE(nikhil): This lock prevents more than N number of threads to be spawn # simultaneously. The number N represents the number of threads in the # executor pool. The value is set to 10 in the eventlet executor. @lockutils.synchronized("glance_import") def _execute(t_id, task_repo, image_repo, image_factory): task = script_utils.get_task(task_repo, t_id) if task is None: # NOTE: This happens if task is not found in the database. In # such cases, there is no way to update the task status so, # it's ignored here. return try: task_input = script_utils.unpack_task_input(task) uri = script_utils.validate_location_uri(task_input.get('import_from')) image_id = import_image(image_repo, image_factory, task_input, t_id, uri) task.succeed({'image_id': image_id}) except Exception as e: # Note: The message string contains Error in it to indicate # in the task.message that it's a error message for the user. # TODO(nikhil): need to bring back save_and_reraise_exception when # necessary err_msg = ("Error: " + six.text_type(type(e)) + ': ' + encodeutils.exception_to_unicode(e)) log_msg = _LE(err_msg + ("Task ID %s" % task.task_id)) # noqa LOG.exception(log_msg) task.fail(_LE(err_msg)) # noqa finally: task_repo.save(task) def import_image(image_repo, image_factory, task_input, task_id, uri): original_image = create_image(image_repo, image_factory, task_input.get('image_properties'), task_id) # NOTE: set image status to saving just before setting data original_image.status = 'saving' image_repo.save(original_image) image_id = original_image.image_id # NOTE: Retrieving image from the database because the Image object # returned from create_image method does not have appropriate factories # wrapped around it. new_image = image_repo.get(image_id) set_image_data(new_image, uri, task_id) try: # NOTE: Check if the Image is not deleted after setting the data # before saving the active image. Here if image status is # saving, then new_image is saved as it contains updated location, # size, virtual_size and checksum information and the status of # new_image is already set to active in set_image_data() call. image = image_repo.get(image_id) if image.status == 'saving': image_repo.save(new_image) return image_id else: msg = _("The Image %(image_id)s object being created by this task " "%(task_id)s, is no longer in valid status for further " "processing.") % {"image_id": image_id, "task_id": task_id} raise exception.Conflict(msg) except (exception.Conflict, exception.NotFound, exception.NotAuthenticated): with excutils.save_and_reraise_exception(): if new_image.locations: for location in new_image.locations: store_utils.delete_image_location_from_backend( new_image.context, image_id, location) def create_image(image_repo, image_factory, image_properties, task_id): _base_properties = [] for k, v in v2_api.get_base_properties().items(): _base_properties.append(k) properties = {} # NOTE: get the base properties for key in _base_properties: try: properties[key] = image_properties.pop(key) except KeyError: LOG.debug("Task ID %(task_id)s: Ignoring property %(k)s for " "setting base properties while creating " "Image.", {'task_id': task_id, 'k': key}) # NOTE: get the rest of the properties and pass them as # extra_properties for Image to be created with them. properties['extra_properties'] = image_properties script_utils.set_base_image_properties(properties=properties) image = image_factory.new_image(**properties) image_repo.add(image) return image def set_image_data(image, uri, task_id): data_iter = None try: LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " "imported"), {"data_uri": uri, "task_id": task_id}) data_iter = script_utils.get_image_data_iter(uri) image.set_data(data_iter) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warn(_LW("Task %(task_id)s failed with exception %(error)s") % {"error": encodeutils.exception_to_unicode(e), "task_id": task_id}) LOG.info(_LI("Task %(task_id)s: Could not import image file" " %(image_data)s"), {"image_data": uri, "task_id": task_id}) finally: if hasattr(data_iter, 'close'): data_iter.close() glance-12.0.0/glance/common/semver_db.py0000664000567000056710000001376112701407047021237 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator import semantic_version from sqlalchemy.orm.properties import CompositeProperty from sqlalchemy import sql from glance.common import exception from glance.i18n import _ MAX_COMPONENT_LENGTH = pow(2, 16) - 1 MAX_NUMERIC_PRERELEASE_LENGTH = 6 class DBVersion(object): def __init__(self, components_long, prerelease, build): """ Creates a DBVersion object out of 3 component fields. This initializer is supposed to be called from SQLAlchemy if 3 database columns are mapped to this composite field. :param components_long: a 64-bit long value, containing numeric components of the version :param prerelease: a prerelease label of the version, optionally preformatted with leading zeroes in numeric-only parts of the label :param build: a build label of the version """ version_string = '%s.%s.%s' % _long_to_components(components_long) if prerelease: version_string += '-' + _strip_leading_zeroes_from_prerelease( prerelease) if build: version_string += '+' + build self.version = semantic_version.Version(version_string) def __repr__(self): return str(self.version) def __eq__(self, other): return (isinstance(other, DBVersion) and other.version == self.version) def __ne__(self, other): return (not isinstance(other, DBVersion) or self.version != other.version) def __composite_values__(self): long_version = _version_to_long(self.version) prerelease = _add_leading_zeroes_to_prerelease(self.version.prerelease) build = '.'.join(self.version.build) if self.version.build else None return long_version, prerelease, build def parse(version_string): version = semantic_version.Version.coerce(version_string) return DBVersion(_version_to_long(version), '.'.join(version.prerelease), '.'.join(version.build)) def _check_limit(value): if value > MAX_COMPONENT_LENGTH: reason = _("Version component is too " "large (%d max)") % MAX_COMPONENT_LENGTH raise exception.InvalidVersion(reason=reason) def _version_to_long(version): """ Converts the numeric part of the semver version into the 64-bit long value using the following logic: * major version is stored in first 16 bits of the value * minor version is stored in next 16 bits * patch version is stored in following 16 bits * next 2 bits are used to store the flag: if the version has pre-release label then these bits are 00, otherwise they are 11. Intermediate values of the flag (01 and 10) are reserved for future usage. * last 14 bits of the value are reserved fo future usage The numeric components of version are checked so their value do not exceed 16 bits. :param version: a semantic_version.Version object """ _check_limit(version.major) _check_limit(version.minor) _check_limit(version.patch) major = version.major << 48 minor = version.minor << 32 patch = version.patch << 16 flag = 0 if version.prerelease else 2 flag <<= 14 return major | minor | patch | flag def _long_to_components(value): major = value >> 48 minor = (value - (major << 48)) >> 32 patch = (value - (major << 48) - (minor << 32)) >> 16 return str(major), str(minor), str(patch) def _add_leading_zeroes_to_prerelease(label_tuple): if label_tuple is None: return None res = [] for component in label_tuple: if component.isdigit(): if len(component) > MAX_NUMERIC_PRERELEASE_LENGTH: reason = _("Prerelease numeric component is too large " "(%d characters " "max)") % MAX_NUMERIC_PRERELEASE_LENGTH raise exception.InvalidVersion(reason=reason) res.append(component.rjust(MAX_NUMERIC_PRERELEASE_LENGTH, '0')) else: res.append(component) return '.'.join(res) def _strip_leading_zeroes_from_prerelease(string_value): res = [] for component in string_value.split('.'): if component.isdigit(): val = component.lstrip('0') if len(val) == 0: # Corner case: when the component is just '0' val = '0' # it will be stripped completely, so restore it res.append(val) else: res.append(component) return '.'.join(res) strict_op_map = { operator.ge: operator.gt, operator.le: operator.lt } class VersionComparator(CompositeProperty.Comparator): def _get_comparison(self, values, op): columns = self.__clause_element__().clauses if op in strict_op_map: stricter_op = strict_op_map[op] else: stricter_op = op return sql.or_(stricter_op(columns[0], values[0]), sql.and_(columns[0] == values[0], op(columns[1], values[1]))) def __gt__(self, other): return self._get_comparison(other.__composite_values__(), operator.gt) def __ge__(self, other): return self._get_comparison(other.__composite_values__(), operator.ge) def __lt__(self, other): return self._get_comparison(other.__composite_values__(), operator.lt) def __le__(self, other): return self._get_comparison(other.__composite_values__(), operator.le) glance-12.0.0/glance/common/exception.py0000664000567000056710000004224312701407047021264 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Glance exception subclasses""" import six import six.moves.urllib.parse as urlparse from glance.i18n import _ _FATAL_EXCEPTION_FORMAT_ERRORS = False class RedirectException(Exception): def __init__(self, url): self.url = urlparse.urlparse(url) class GlanceException(Exception): """ Base Glance Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred") def __init__(self, message=None, *args, **kwargs): if not message: message = self.message try: if kwargs: message = message % kwargs except Exception: if _FATAL_EXCEPTION_FORMAT_ERRORS: raise else: # at least get the core message out if something happened pass self.msg = message super(GlanceException, self).__init__(message) def __unicode__(self): # NOTE(flwang): By default, self.msg is an instance of Message, which # can't be converted by str(). Based on the definition of # __unicode__, it should return unicode always. return six.text_type(self.msg) class MissingCredentialError(GlanceException): message = _("Missing required credential: %(required)s") class BadAuthStrategy(GlanceException): message = _("Incorrect auth strategy, expected \"%(expected)s\" but " "received \"%(received)s\"") class NotFound(GlanceException): message = _("An object with the specified identifier was not found.") class BadStoreUri(GlanceException): message = _("The Store URI was malformed.") class Duplicate(GlanceException): message = _("An object with the same identifier already exists.") class Conflict(GlanceException): message = _("An object with the same identifier is currently being " "operated on.") class StorageQuotaFull(GlanceException): message = _("The size of the data %(image_size)s will exceed the limit. " "%(remaining)s bytes remaining.") class AuthBadRequest(GlanceException): message = _("Connect error/bad request to Auth service at URL %(url)s.") class AuthUrlNotFound(GlanceException): message = _("Auth service at URL %(url)s not found.") class AuthorizationFailure(GlanceException): message = _("Authorization failed.") class NotAuthenticated(GlanceException): message = _("You are not authenticated.") class UploadException(GlanceException): message = _('Image upload problem: %s') class Forbidden(GlanceException): message = _("You are not authorized to complete this action.") class ForbiddenPublicImage(Forbidden): message = _("You are not authorized to complete this action.") class ProtectedImageDelete(Forbidden): message = _("Image %(image_id)s is protected and cannot be deleted.") class ProtectedMetadefNamespaceDelete(Forbidden): message = _("Metadata definition namespace %(namespace)s is protected" " and cannot be deleted.") class ProtectedMetadefNamespacePropDelete(Forbidden): message = _("Metadata definition property %(property_name)s is protected" " and cannot be deleted.") class ProtectedMetadefObjectDelete(Forbidden): message = _("Metadata definition object %(object_name)s is protected" " and cannot be deleted.") class ProtectedMetadefResourceTypeAssociationDelete(Forbidden): message = _("Metadata definition resource-type-association" " %(resource_type)s is protected and cannot be deleted.") class ProtectedMetadefResourceTypeSystemDelete(Forbidden): message = _("Metadata definition resource-type %(resource_type_name)s is" " a seeded-system type and cannot be deleted.") class ProtectedMetadefTagDelete(Forbidden): message = _("Metadata definition tag %(tag_name)s is protected" " and cannot be deleted.") class Invalid(GlanceException): message = _("Data supplied was not valid.") class InvalidSortKey(Invalid): message = _("Sort key supplied was not valid.") class InvalidSortDir(Invalid): message = _("Sort direction supplied was not valid.") class InvalidPropertyProtectionConfiguration(Invalid): message = _("Invalid configuration in property protection file.") class InvalidSwiftStoreConfiguration(Invalid): message = _("Invalid configuration in glance-swift conf file.") class InvalidFilterOperatorValue(Invalid): message = _("Unable to filter using the specified operator.") class InvalidFilterRangeValue(Invalid): message = _("Unable to filter using the specified range.") class InvalidOptionValue(Invalid): message = _("Invalid value for option %(option)s: %(value)s") class ReadonlyProperty(Forbidden): message = _("Attribute '%(property)s' is read-only.") class ReservedProperty(Forbidden): message = _("Attribute '%(property)s' is reserved.") class AuthorizationRedirect(GlanceException): message = _("Redirecting to %(uri)s for authorization.") class ClientConnectionError(GlanceException): message = _("There was an error connecting to a server") class ClientConfigurationError(GlanceException): message = _("There was an error configuring the client.") class MultipleChoices(GlanceException): message = _("The request returned a 302 Multiple Choices. This generally " "means that you have not included a version indicator in a " "request URI.\n\nThe body of response returned:\n%(body)s") class LimitExceeded(GlanceException): message = _("The request returned a 413 Request Entity Too Large. This " "generally means that rate limiting or a quota threshold was " "breached.\n\nThe response body:\n%(body)s") def __init__(self, *args, **kwargs): self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') else None) super(LimitExceeded, self).__init__(*args, **kwargs) class ServiceUnavailable(GlanceException): message = _("The request returned 503 Service Unavailable. This " "generally occurs on service overload or other transient " "outage.") def __init__(self, *args, **kwargs): self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') else None) super(ServiceUnavailable, self).__init__(*args, **kwargs) class ServerError(GlanceException): message = _("The request returned 500 Internal Server Error.") class UnexpectedStatus(GlanceException): message = _("The request returned an unexpected status: %(status)s." "\n\nThe response body:\n%(body)s") class InvalidContentType(GlanceException): message = _("Invalid content type %(content_type)s") class BadRegistryConnectionConfiguration(GlanceException): message = _("Registry was not configured correctly on API server. " "Reason: %(reason)s") class BadDriverConfiguration(GlanceException): message = _("Driver %(driver_name)s could not be configured correctly. " "Reason: %(reason)s") class MaxRedirectsExceeded(GlanceException): message = _("Maximum redirects (%(redirects)s) was exceeded.") class InvalidRedirect(GlanceException): message = _("Received invalid HTTP redirect.") class NoServiceEndpoint(GlanceException): message = _("Response from Keystone does not contain a Glance endpoint.") class RegionAmbiguity(GlanceException): message = _("Multiple 'image' service matches for region %(region)s. This " "generally means that a region is required and you have not " "supplied one.") class WorkerCreationFailure(GlanceException): message = _("Server worker creation failed: %(reason)s.") class SchemaLoadError(GlanceException): message = _("Unable to load schema: %(reason)s") class InvalidObject(GlanceException): message = _("Provided object does not match schema " "'%(schema)s': %(reason)s") class ImageSizeLimitExceeded(GlanceException): message = _("The provided image is too large.") class ImageMemberLimitExceeded(LimitExceeded): message = _("The limit has been exceeded on the number of allowed image " "members for this image. Attempted: %(attempted)s, " "Maximum: %(maximum)s") class ImagePropertyLimitExceeded(LimitExceeded): message = _("The limit has been exceeded on the number of allowed image " "properties. Attempted: %(attempted)s, Maximum: %(maximum)s") class ImageTagLimitExceeded(LimitExceeded): message = _("The limit has been exceeded on the number of allowed image " "tags. Attempted: %(attempted)s, Maximum: %(maximum)s") class ImageLocationLimitExceeded(LimitExceeded): message = _("The limit has been exceeded on the number of allowed image " "locations. Attempted: %(attempted)s, Maximum: %(maximum)s") class SIGHUPInterrupt(GlanceException): message = _("System SIGHUP signal received.") class RPCError(GlanceException): message = _("%(cls)s exception was raised in the last rpc call: %(val)s") class TaskException(GlanceException): message = _("An unknown task exception occurred") class BadTaskConfiguration(GlanceException): message = _("Task was not configured properly") class ImageNotFound(NotFound): message = _("Image with the given id %(image_id)s was not found") class TaskNotFound(TaskException, NotFound): message = _("Task with the given id %(task_id)s was not found") class InvalidTaskStatus(TaskException, Invalid): message = _("Provided status of task is unsupported: %(status)s") class InvalidTaskType(TaskException, Invalid): message = _("Provided type of task is unsupported: %(type)s") class InvalidTaskStatusTransition(TaskException, Invalid): message = _("Status transition from %(cur_status)s to" " %(new_status)s is not allowed") class ImportTaskError(TaskException, Invalid): message = _("An import task exception occurred") class DuplicateLocation(Duplicate): message = _("The location %(location)s already exists") class InvalidParameterValue(Invalid): message = _("Invalid value '%(value)s' for parameter '%(param)s': " "%(extra_msg)s") class InvalidImageStatusTransition(Invalid): message = _("Image status transition from %(cur_status)s to" " %(new_status)s is not allowed") class MetadefDuplicateNamespace(Duplicate): message = _("The metadata definition namespace=%(namespace_name)s" " already exists.") class MetadefDuplicateObject(Duplicate): message = _("A metadata definition object with name=%(object_name)s" " already exists in namespace=%(namespace_name)s.") class MetadefDuplicateProperty(Duplicate): message = _("A metadata definition property with name=%(property_name)s" " already exists in namespace=%(namespace_name)s.") class MetadefDuplicateResourceType(Duplicate): message = _("A metadata definition resource-type with" " name=%(resource_type_name)s already exists.") class MetadefDuplicateResourceTypeAssociation(Duplicate): message = _("The metadata definition resource-type association of" " resource-type=%(resource_type_name)s to" " namespace=%(namespace_name)s" " already exists.") class MetadefDuplicateTag(Duplicate): message = _("A metadata tag with name=%(name)s" " already exists in namespace=%(namespace_name)s.") class MetadefForbidden(Forbidden): message = _("You are not authorized to complete this action.") class MetadefIntegrityError(Forbidden): message = _("The metadata definition %(record_type)s with" " name=%(record_name)s not deleted." " Other records still refer to it.") class MetadefNamespaceNotFound(NotFound): message = _("Metadata definition namespace=%(namespace_name)s" "was not found.") class MetadefObjectNotFound(NotFound): message = _("The metadata definition object with" " name=%(object_name)s was not found in" " namespace=%(namespace_name)s.") class MetadefPropertyNotFound(NotFound): message = _("The metadata definition property with" " name=%(property_name)s was not found in" " namespace=%(namespace_name)s.") class MetadefResourceTypeNotFound(NotFound): message = _("The metadata definition resource-type with" " name=%(resource_type_name)s, was not found.") class MetadefResourceTypeAssociationNotFound(NotFound): message = _("The metadata definition resource-type association of" " resource-type=%(resource_type_name)s to" " namespace=%(namespace_name)s," " was not found.") class MetadefTagNotFound(NotFound): message = _("The metadata definition tag with" " name=%(name)s was not found in" " namespace=%(namespace_name)s.") class SignatureVerificationError(GlanceException): message = _("Unable to verify signature: %(reason)s") class InvalidVersion(Invalid): message = _("Version is invalid: %(reason)s") class InvalidArtifactTypePropertyDefinition(Invalid): message = _("Invalid property definition") class InvalidArtifactTypeDefinition(Invalid): message = _("Invalid type definition") class InvalidArtifactPropertyValue(Invalid): message = _("Property '%(name)s' may not have value '%(val)s': %(msg)s") def __init__(self, message=None, *args, **kwargs): super(InvalidArtifactPropertyValue, self).__init__(message, *args, **kwargs) self.name = kwargs.get('name') self.value = kwargs.get('val') class ArtifactNotFound(NotFound): message = _("Artifact with id=%(id)s was not found") class ArtifactForbidden(Forbidden): message = _("Artifact with id=%(id)s is not accessible") class ArtifactDuplicateNameTypeVersion(Duplicate): message = _("Artifact with the specified type, name and version" " already exists") class InvalidArtifactStateTransition(Invalid): message = _("Artifact cannot change state from %(source)s to %(target)s") class ArtifactDuplicateDirectDependency(Duplicate): message = _("Artifact with the specified type, name and version" " already has the direct dependency=%(dep)s") class ArtifactDuplicateTransitiveDependency(Duplicate): message = _("Artifact with the specified type, name and version" " already has the transitive dependency=%(dep)s") class ArtifactCircularDependency(Invalid): message = _("Artifact with a circular dependency can not be created") class ArtifactUnsupportedPropertyOperator(Invalid): message = _("Operator %(op)s is not supported") class ArtifactUnsupportedShowLevel(Invalid): message = _("Show level %(shl)s is not supported in this operation") class ArtifactPropertyValueNotFound(NotFound): message = _("Property's %(prop)s value has not been found") class ArtifactInvalidProperty(Invalid): message = _("Artifact has no property %(prop)s") class ArtifactInvalidPropertyParameter(Invalid): message = _("Cannot use this parameter with the operator %(op)s") class ArtifactLoadError(GlanceException): message = _("Cannot load artifact '%(name)s'") class ArtifactNonMatchingTypeName(ArtifactLoadError): message = _( "Plugin name '%(plugin)s' should match artifact typename '%(name)s'") class ArtifactPluginNotFound(NotFound): message = _("No plugin for '%(name)s' has been loaded") class UnknownArtifactType(NotFound): message = _("Artifact type with name '%(name)s' and version '%(version)s' " "is not known") class ArtifactInvalidStateTransition(Invalid): message = _("Artifact state cannot be changed from %(curr)s to %(to)s") class JsonPatchException(GlanceException): message = _("Invalid jsonpatch request") class InvalidJsonPatchBody(JsonPatchException): message = _("The provided body %(body)s is invalid " "under given schema: %(schema)s") class InvalidJsonPatchPath(JsonPatchException): message = _("The provided path '%(path)s' is invalid: %(explanation)s") def __init__(self, message=None, *args, **kwargs): self.explanation = kwargs.get("explanation") super(InvalidJsonPatchPath, self).__init__(message, *args, **kwargs) glance-12.0.0/glance/common/property_utils.py0000664000567000056710000001776412701407047022404 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy from six.moves import configparser import glance.api.policy from glance.common import exception from glance.i18n import _, _LE CONFIG = configparser.SafeConfigParser() LOG = logging.getLogger(__name__) property_opts = [ cfg.StrOpt('property_protection_file', help=_('The location of the property protection file.' 'This file contains the rules for property protections ' 'and the roles/policies associated with it. If this ' 'config value is not specified, by default, property ' 'protections won\'t be enforced. If a value is ' 'specified and the file is not found, then the ' 'glance-api service will not start.')), cfg.StrOpt('property_protection_rule_format', default='roles', choices=('roles', 'policies'), help=_('This config value indicates whether "roles" or ' '"policies" are used in the property protection file.')), ] CONF = cfg.CONF CONF.register_opts(property_opts) # NOTE (spredzy): Due to the particularly lengthy name of the exception # and the number of occurrence it is raise in this file, a variable is # created InvalidPropProtectConf = exception.InvalidPropertyProtectionConfiguration def is_property_protection_enabled(): if CONF.property_protection_file: return True return False class PropertyRules(object): def __init__(self, policy_enforcer=None): self.rules = [] self.prop_exp_mapping = {} self.policies = [] self.policy_enforcer = policy_enforcer or glance.api.policy.Enforcer() self.prop_prot_rule_format = CONF.property_protection_rule_format self.prop_prot_rule_format = self.prop_prot_rule_format.lower() self._load_rules() def _load_rules(self): try: conf_file = CONF.find_file(CONF.property_protection_file) CONFIG.read(conf_file) except Exception as e: msg = (_LE("Couldn't find property protection file %(file)s: " "%(error)s.") % {'file': CONF.property_protection_file, 'error': e}) LOG.error(msg) raise InvalidPropProtectConf() if self.prop_prot_rule_format not in ['policies', 'roles']: msg = _LE("Invalid value '%s' for " "'property_protection_rule_format'. " "The permitted values are " "'roles' and 'policies'") % self.prop_prot_rule_format LOG.error(msg) raise InvalidPropProtectConf() operations = ['create', 'read', 'update', 'delete'] properties = CONFIG.sections() for property_exp in properties: property_dict = {} compiled_rule = self._compile_rule(property_exp) for operation in operations: permissions = CONFIG.get(property_exp, operation) if permissions: if self.prop_prot_rule_format == 'policies': if ',' in permissions: LOG.error( _LE("Multiple policies '%s' not allowed " "for a given operation. Policies can be " "combined in the policy file"), permissions) raise InvalidPropProtectConf() self.prop_exp_mapping[compiled_rule] = property_exp self._add_policy_rules(property_exp, operation, permissions) permissions = [permissions] else: permissions = [permission.strip() for permission in permissions.split(',')] if '@' in permissions and '!' in permissions: msg = (_LE( "Malformed property protection rule in " "[%(prop)s] %(op)s=%(perm)s: '@' and '!' " "are mutually exclusive") % dict(prop=property_exp, op=operation, perm=permissions)) LOG.error(msg) raise InvalidPropProtectConf() property_dict[operation] = permissions else: property_dict[operation] = [] LOG.warn( _('Property protection on operation %(operation)s' ' for rule %(rule)s is not found. No role will be' ' allowed to perform this operation.') % {'operation': operation, 'rule': property_exp}) self.rules.append((compiled_rule, property_dict)) def _compile_rule(self, rule): try: return re.compile(rule) except Exception as e: msg = (_LE("Encountered a malformed property protection rule" " %(rule)s: %(error)s.") % {'rule': rule, 'error': e}) LOG.error(msg) raise InvalidPropProtectConf() def _add_policy_rules(self, property_exp, action, rule): """Add policy rules to the policy enforcer. For example, if the file listed as property_protection_file has: [prop_a] create = glance_creator then the corresponding policy rule would be: "prop_a:create": "rule:glance_creator" where glance_creator is defined in policy.json. For example: "glance_creator": "role:admin or role:glance_create_user" """ rule = "rule:%s" % rule rule_name = "%s:%s" % (property_exp, action) rule_dict = policy.Rules.from_dict({ rule_name: rule }) self.policy_enforcer.add_rules(rule_dict) def _check_policy(self, property_exp, action, context): try: action = ":".join([property_exp, action]) self.policy_enforcer.enforce(context, action, {}) except exception.Forbidden: return False return True def check_property_rules(self, property_name, action, context): roles = context.roles if not self.rules: return True if action not in ['create', 'read', 'update', 'delete']: return False for rule_exp, rule in self.rules: if rule_exp.search(str(property_name)): break else: # no matching rules return False rule_roles = rule.get(action) if rule_roles: if '!' in rule_roles: return False elif '@' in rule_roles: return True if self.prop_prot_rule_format == 'policies': prop_exp_key = self.prop_exp_mapping[rule_exp] return self._check_policy(prop_exp_key, action, context) if set(roles).intersection(set([role.lower() for role in rule_roles])): return True return False glance-12.0.0/glance/common/timeutils.py0000664000567000056710000000546012701407047021305 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Time related utilities and helper functions. """ import datetime import iso8601 from monotonic import monotonic as now # noqa from oslo_utils import encodeutils # ISO 8601 extended time format with microseconds _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' st += ('Z' if tz == 'UTC' else tz) return st def parse_isotime(timestr): """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: raise ValueError(encodeutils.exception_to_unicode(e)) except TypeError as e: raise ValueError(encodeutils.exception_to_unicode(e)) def utcnow(with_timezone=False): """Overridable version of utils.utcnow that can return a TZ-aware datetime. """ if utcnow.override_time: try: return utcnow.override_time.pop(0) except AttributeError: return utcnow.override_time if with_timezone: return datetime.datetime.now(tz=iso8601.iso8601.UTC) return datetime.datetime.utcnow() def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset def iso8601_from_timestamp(timestamp, microsecond=False): """Returns an iso8601 formatted date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp), microsecond) utcnow.override_time = None def delta_seconds(before, after): """Return the difference between two timing objects. Compute the difference in seconds between two date, time, or datetime objects (as a float, to microsecond resolution). """ delta = after - before return datetime.timedelta.total_seconds(delta) glance-12.0.0/glance/common/signature_utils.py0000664000567000056710000004547012701407047022514 0ustar jenkinsjenkins00000000000000# Copyright (c) The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Support signature verification.""" import binascii import datetime from castellan import key_manager from cryptography import exceptions as crypto_exception from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import dsa from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography import x509 import debtcollector from oslo_log import log as logging from oslo_serialization import base64 from oslo_utils import encodeutils from glance.common import exception from glance.i18n import _, _LE LOG = logging.getLogger(__name__) # Note: This is the signature hash method, which is independent from the # image data checksum hash method (which is handled elsewhere). HASH_METHODS = { 'SHA-224': hashes.SHA224(), 'SHA-256': hashes.SHA256(), 'SHA-384': hashes.SHA384(), 'SHA-512': hashes.SHA512() } # Currently supported signature key types # RSA Options RSA_PSS = 'RSA-PSS' # DSA Options DSA = 'DSA' # ECC curves -- note that only those with key sizes >=384 are included # Note also that some of these may not be supported by the cryptography backend ECC_CURVES = ( ec.SECT571K1(), ec.SECT409K1(), ec.SECT571R1(), ec.SECT409R1(), ec.SECP521R1(), ec.SECP384R1(), ) # These are the currently supported certificate formats (X_509,) = ( 'X.509', ) CERTIFICATE_FORMATS = { X_509 } # These are the currently supported MGF formats, used for RSA-PSS signatures MASK_GEN_ALGORITHMS = { 'MGF1': padding.MGF1 } # Required image property names (SIGNATURE, HASH_METHOD, KEY_TYPE, CERT_UUID) = ( 'img_signature', 'img_signature_hash_method', 'img_signature_key_type', 'img_signature_certificate_uuid' ) # TODO(bpoulos): remove when 'sign-the-hash' approach is no longer supported (OLD_SIGNATURE, OLD_HASH_METHOD, OLD_KEY_TYPE, OLD_CERT_UUID) = ( 'signature', 'signature_hash_method', 'signature_key_type', 'signature_certificate_uuid' ) # Optional image property names for RSA-PSS # TODO(bpoulos): remove when 'sign-the-hash' approach is no longer supported (MASK_GEN_ALG, PSS_SALT_LENGTH) = ( 'mask_gen_algorithm', 'pss_salt_length' ) class SignatureKeyType(object): _REGISTERED_TYPES = {} def __init__(self, name, public_key_type, create_verifier): self.name = name self.public_key_type = public_key_type self.create_verifier = create_verifier @classmethod def register(cls, name, public_key_type, create_verifier): """Register a signature key type. :param name: the name of the signature key type :param public_key_type: e.g. RSAPublicKey, DSAPublicKey, etc. :param create_verifier: a function to create a verifier for this type """ cls._REGISTERED_TYPES[name] = cls(name, public_key_type, create_verifier) @classmethod def lookup(cls, name): """Look up the signature key type. :param name: the name of the signature key type :returns: the SignatureKeyType object :raises: glance.common.exception.SignatureVerificationError if signature key type is invalid """ if name not in cls._REGISTERED_TYPES: raise exception.SignatureVerificationError( _('Invalid signature key type: %s') % name ) return cls._REGISTERED_TYPES[name] # each key type will require its own verifier def create_verifier_for_pss(signature, hash_method, public_key, image_properties): """Create the verifier to use when the key type is RSA-PSS. :param signature: the decoded signature to use :param hash_method: the hash method to use, as a cryptography object :param public_key: the public key to use, as a cryptography object :param image_properties: the key-value properties about the image :returns: the verifier to use to verify the signature for RSA-PSS :raises glance.common.exception.SignatureVerificationError: if the RSA-PSS specific properties are invalid """ # retrieve other needed properties, or use defaults if not there if MASK_GEN_ALG in image_properties: mask_gen_algorithm = image_properties[MASK_GEN_ALG] if mask_gen_algorithm not in MASK_GEN_ALGORITHMS: raise exception.SignatureVerificationError( _('Invalid mask_gen_algorithm: %s') % mask_gen_algorithm ) mgf = MASK_GEN_ALGORITHMS[mask_gen_algorithm](hash_method) else: # default to MGF1 mgf = padding.MGF1(hash_method) if PSS_SALT_LENGTH in image_properties: pss_salt_length = image_properties[PSS_SALT_LENGTH] try: salt_length = int(pss_salt_length) except ValueError: raise exception.SignatureVerificationError( _('Invalid pss_salt_length: %s') % pss_salt_length ) else: # default to max salt length salt_length = padding.PSS.MAX_LENGTH # return the verifier return public_key.verifier( signature, padding.PSS(mgf=mgf, salt_length=salt_length), hash_method ) def create_verifier_for_ecc(signature, hash_method, public_key, image_properties): """Create the verifier to use when the key type is ECC_*. :param signature: the decoded signature to use :param hash_method: the hash method to use, as a cryptography object :param public_key: the public key to use, as a cryptography object :param image_properties: the key-value properties about the image :return: the verifier to use to verify the signature for ECC_* """ # return the verifier return public_key.verifier( signature, ec.ECDSA(hash_method) ) def create_verifier_for_dsa(signature, hash_method, public_key, image_properties): """Create verifier to use when the key type is DSA :param signature: the decoded signature to use :param hash_method: the hash method to use, as a cryptography object :param public_key: the public key to use, as a cryptography object :param image_properties: the key-value properties about the image :returns: the verifier to use to verify the signature for DSA """ # return the verifier return public_key.verifier( signature, hash_method ) # map the key type to the verifier function to use SignatureKeyType.register(RSA_PSS, rsa.RSAPublicKey, create_verifier_for_pss) SignatureKeyType.register(DSA, dsa.DSAPublicKey, create_verifier_for_dsa) # Register the elliptic curves which are supported by the backend for curve in ECC_CURVES: if default_backend().elliptic_curve_supported(curve): SignatureKeyType.register('ECC_' + curve.name.upper(), ec.EllipticCurvePublicKey, create_verifier_for_ecc) def should_create_verifier(image_properties): """Determine whether a verifier should be created. Using the image properties, determine whether existing properties indicate that signature verification should be done. :param image_properties: the key-value properties about the image :return: True, if signature metadata properties exist, False otherwise """ return (image_properties is not None and CERT_UUID in image_properties and HASH_METHOD in image_properties and SIGNATURE in image_properties and KEY_TYPE in image_properties) def get_verifier(context, image_properties): """Retrieve the image properties and use them to create a verifier. :param context: the user context for authentication :param image_properties: the key-value properties about the image :return: instance of cryptography AsymmetricVerificationContext :raises glance.common.exception.SignatureVerificationError: if building the verifier fails """ if not should_create_verifier(image_properties): raise exception.SignatureVerificationError( _('Required image properties for signature verification do not' ' exist. Cannot verify signature.') ) signature = get_signature(image_properties[SIGNATURE]) hash_method = get_hash_method(image_properties[HASH_METHOD]) signature_key_type = SignatureKeyType.lookup( image_properties[KEY_TYPE]) public_key = get_public_key(context, image_properties[CERT_UUID], signature_key_type) # create the verifier based on the signature key type try: verifier = signature_key_type.create_verifier(signature, hash_method, public_key, image_properties) except crypto_exception.UnsupportedAlgorithm as e: msg = (_LE("Unable to create verifier since algorithm is " "unsupported: %(e)s") % {'e': encodeutils.exception_to_unicode(e)}) LOG.error(msg) raise exception.SignatureVerificationError( _('Unable to verify signature since the algorithm is unsupported ' 'on this system') ) if verifier: return verifier else: # Error creating the verifier raise exception.SignatureVerificationError( _('Error occurred while creating the verifier') ) @debtcollector.removals.remove(message="This will be removed in the N cycle.") def should_verify_signature(image_properties): """Determine whether a signature should be verified. Using the image properties, determine whether existing properties indicate that signature verification should be done. :param image_properties: the key-value properties about the image :returns: True, if signature metadata properties exist, False otherwise """ return (image_properties is not None and OLD_CERT_UUID in image_properties and OLD_HASH_METHOD in image_properties and OLD_SIGNATURE in image_properties and OLD_KEY_TYPE in image_properties) @debtcollector.removals.remove( message="Starting with the Mitaka release, this approach to signature " "verification using the image 'checksum' and signature metadata " "properties that do not start with 'img' will not be supported. " "This functionality will be removed in the N release. This " "approach is being replaced with a signature of the data " "directly, instead of a signature of the hash method, and the new " "approach uses properties that start with 'img_'.") def verify_signature(context, checksum_hash, image_properties): """Retrieve the image properties and use them to verify the signature. :param context: the user context for authentication :param checksum_hash: the 'checksum' hash of the image data :param image_properties: the key-value properties about the image :returns: True if verification succeeds :raises glance.common.exception.SignatureVerificationError: if verification fails """ if not should_verify_signature(image_properties): raise exception.SignatureVerificationError( _('Required image properties for signature verification do not' ' exist. Cannot verify signature.') ) checksum_hash = encodeutils.to_utf8(checksum_hash) signature = get_signature(image_properties[OLD_SIGNATURE]) hash_method = get_hash_method(image_properties[OLD_HASH_METHOD]) signature_key_type = SignatureKeyType.lookup( image_properties[OLD_KEY_TYPE]) public_key = get_public_key(context, image_properties[OLD_CERT_UUID], signature_key_type) # create the verifier based on the signature key type try: verifier = signature_key_type.create_verifier(signature, hash_method, public_key, image_properties) except crypto_exception.UnsupportedAlgorithm as e: msg = (_LE("Unable to create verifier since algorithm is " "unsupported: %(e)s") % {'e': encodeutils.exception_to_unicode(e)}) LOG.error(msg) raise exception.SignatureVerificationError( _('Unable to verify signature since the algorithm is unsupported ' 'on this system') ) if verifier: # Verify the signature verifier.update(checksum_hash) try: verifier.verify() return True except crypto_exception.InvalidSignature: raise exception.SignatureVerificationError( _('Signature verification failed.') ) else: # Error creating the verifier raise exception.SignatureVerificationError( _('Error occurred while verifying the signature') ) def get_signature(signature_data): """Decode the signature data and returns the signature. :param siganture_data: the base64-encoded signature data :returns: the decoded signature :raises glance.common.exception.SignatureVerificationError: if the signature data is malformatted """ try: signature = base64.decode_as_bytes(signature_data) except (TypeError, binascii.Error): raise exception.SignatureVerificationError( _('The signature data was not properly encoded using base64') ) return signature def get_hash_method(hash_method_name): """Verify the hash method name and create the hash method. :param hash_method_name: the name of the hash method to retrieve :returns: the hash method, a cryptography object :raises glance.common.exception.SignatureVerificationError: if the hash method name is invalid """ if hash_method_name not in HASH_METHODS: raise exception.SignatureVerificationError( _('Invalid signature hash method: %s') % hash_method_name ) return HASH_METHODS[hash_method_name] def get_public_key(context, signature_certificate_uuid, signature_key_type): """Create the public key object from a retrieved certificate. :param context: the user context for authentication :param signature_certificate_uuid: the uuid to use to retrieve the certificate :param signature_key_type: a SignatureKeyType object :returns: the public key cryptography object :raises glance.common.exception.SignatureVerificationError: if public key format is invalid """ certificate = get_certificate(context, signature_certificate_uuid) # Note that this public key could either be # RSAPublicKey, DSAPublicKey, or EllipticCurvePublicKey public_key = certificate.public_key() # Confirm the type is of the type expected based on the signature key type if not isinstance(public_key, signature_key_type.public_key_type): raise exception.SignatureVerificationError( _('Invalid public key type for signature key type: %s') % signature_key_type ) return public_key def get_certificate(context, signature_certificate_uuid): """Create the certificate object from the retrieved certificate data. :param context: the user context for authentication :param signature_certificate_uuid: the uuid to use to retrieve the certificate :returns: the certificate cryptography object :raises glance.common.exception.SignatureVerificationError: if the retrieval fails or the format is invalid """ keymgr_api = key_manager.API() try: # The certificate retrieved here is a castellan certificate object cert = keymgr_api.get(context, signature_certificate_uuid) except Exception as e: # The problem encountered may be backend-specific, since castellan # can use different backends. Rather than importing all possible # backends here, the generic "Exception" is used. msg = (_LE("Unable to retrieve certificate with ID %(id)s: %(e)s") % {'id': signature_certificate_uuid, 'e': encodeutils.exception_to_unicode(e)}) LOG.error(msg) raise exception.SignatureVerificationError( _('Unable to retrieve certificate with ID: %s') % signature_certificate_uuid ) if cert.format not in CERTIFICATE_FORMATS: raise exception.SignatureVerificationError( _('Invalid certificate format: %s') % cert.format ) if cert.format == X_509: # castellan always encodes certificates in DER format cert_data = cert.get_encoded() certificate = x509.load_der_x509_certificate(cert_data, default_backend()) else: raise exception.SignatureVerificationError( _('Certificate format not supported: %s') % cert.format ) # verify the certificate verify_certificate(certificate) return certificate def verify_certificate(certificate): """Verify that the certificate has not expired. :param certificate: the cryptography certificate object :raises glance.common.exception.SignatureVerificationError: if the certificate valid time range does not include now """ # Get now in UTC, since certificate returns times in UTC now = datetime.datetime.utcnow() # Confirm the certificate valid time range includes now if now < certificate.not_valid_before: raise exception.SignatureVerificationError( _('Certificate is not valid before: %s UTC') % certificate.not_valid_before ) elif now > certificate.not_valid_after: raise exception.SignatureVerificationError( _('Certificate is not valid after: %s UTC') % certificate.not_valid_after ) glance-12.0.0/glance/common/rpc.py0000664000567000056710000002213012701407051020036 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ RPC Controller """ import datetime import traceback from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import oslo_utils.importutils as imp import six from webob import exc from glance.common import client from glance.common import exception from glance.common import timeutils from glance.common import wsgi from glance.i18n import _, _LE LOG = logging.getLogger(__name__) rpc_opts = [ # NOTE(flaper87): Shamelessly copied # from oslo rpc. cfg.ListOpt('allowed_rpc_exception_modules', default=['glance.common.exception', 'builtins', 'exceptions', ], help='Modules of exceptions that are permitted to be recreated' ' upon receiving exception data from an rpc call.'), ] CONF = cfg.CONF CONF.register_opts(rpc_opts) class RPCJSONSerializer(wsgi.JSONResponseSerializer): @staticmethod def _to_primitive(_type, _value): return {"_type": _type, "_value": _value} def _sanitizer(self, obj): if isinstance(obj, datetime.datetime): return self._to_primitive("datetime", obj.isoformat()) return super(RPCJSONSerializer, self)._sanitizer(obj) class RPCJSONDeserializer(wsgi.JSONRequestDeserializer): @staticmethod def _to_datetime(obj): return timeutils.normalize_time(timeutils.parse_isotime(obj)) def _sanitizer(self, obj): try: _type, _value = obj["_type"], obj["_value"] return getattr(self, "_to_" + _type)(_value) except (KeyError, AttributeError): return obj class Controller(object): """ Base RPCController. This is the base controller for RPC based APIs. Commands handled by this controller respect the following form: [{ 'command': 'method_name', 'kwargs': {...} }] The controller is capable of processing more than one command per request and will always return a list of results. :params raise_exc: Boolean that specifies whether to raise exceptions instead of "serializing" them. """ def __init__(self, raise_exc=False): self._registered = {} self.raise_exc = raise_exc def register(self, resource, filtered=None, excluded=None, refiner=None): """ Exports methods through the RPC Api. :params resource: Resource's instance to register. :params filtered: List of methods that *can* be registered. Read as "Method must be in this list". :params excluded: List of methods to exclude. :params refiner: Callable to use as filter for methods. :raises TypeError: If refiner is not callable. """ funcs = filter(lambda x: not x.startswith("_"), dir(resource)) if filtered: funcs = [f for f in funcs if f in filtered] if excluded: funcs = [f for f in funcs if f not in excluded] if refiner: funcs = filter(refiner, funcs) for name in funcs: meth = getattr(resource, name) if not callable(meth): continue self._registered[name] = meth def __call__(self, req, body): """ Executes the command """ if not isinstance(body, list): msg = _("Request must be a list of commands") raise exc.HTTPBadRequest(explanation=msg) def validate(cmd): if not isinstance(cmd, dict): msg = _("Bad Command: %s") % str(cmd) raise exc.HTTPBadRequest(explanation=msg) command, kwargs = cmd.get("command"), cmd.get("kwargs") if (not command or not isinstance(command, six.string_types) or (kwargs and not isinstance(kwargs, dict))): msg = _("Wrong command structure: %s") % (str(cmd)) raise exc.HTTPBadRequest(explanation=msg) method = self._registered.get(command) if not method: # Just raise 404 if the user tries to # access a private method. No need for # 403 here since logically the command # is not registered to the rpc dispatcher raise exc.HTTPNotFound(explanation=_("Command not found")) return True # If more than one command were sent then they might # be intended to be executed sequentially, that for, # lets first verify they're all valid before executing # them. commands = filter(validate, body) results = [] for cmd in commands: # kwargs is not required command, kwargs = cmd["command"], cmd.get("kwargs", {}) method = self._registered[command] try: result = method(req.context, **kwargs) except Exception as e: if self.raise_exc: raise cls, val = e.__class__, encodeutils.exception_to_unicode(e) msg = (_LE("RPC Call Error: %(val)s\n%(tb)s") % dict(val=val, tb=traceback.format_exc())) LOG.error(msg) # NOTE(flaper87): Don't propagate all exceptions # but the ones allowed by the user. module = cls.__module__ if module not in CONF.allowed_rpc_exception_modules: cls = exception.RPCError val = encodeutils.exception_to_unicode( exception.RPCError(cls=cls, val=val)) cls_path = "%s.%s" % (cls.__module__, cls.__name__) result = {"_error": {"cls": cls_path, "val": val}} results.append(result) return results class RPCClient(client.BaseClient): def __init__(self, *args, **kwargs): self._serializer = RPCJSONSerializer() self._deserializer = RPCJSONDeserializer() self.raise_exc = kwargs.pop("raise_exc", True) self.base_path = kwargs.pop("base_path", '/rpc') super(RPCClient, self).__init__(*args, **kwargs) @client.handle_unauthenticated def bulk_request(self, commands): """ Execute multiple commands in a single request. :params commands: List of commands to send. Commands must respect the following form: { 'command': 'method_name', 'kwargs': method_kwargs } """ body = self._serializer.to_json(commands) response = super(RPCClient, self).do_request('POST', self.base_path, body) return self._deserializer.from_json(response.read()) def do_request(self, method, **kwargs): """ Simple do_request override. This method serializes the outgoing body and builds the command that will be sent. :params method: The remote python method to call :params kwargs: Dynamic parameters that will be passed to the remote method. """ content = self.bulk_request([{'command': method, 'kwargs': kwargs}]) # NOTE(flaper87): Return the first result if # a single command was executed. content = content[0] # NOTE(flaper87): Check if content is an error # and re-raise it if raise_exc is True. Before # checking if content contains the '_error' key, # verify if it is an instance of dict - since the # RPC call may have returned something different. if self.raise_exc and (isinstance(content, dict) and '_error' in content): error = content['_error'] try: exc_cls = imp.import_class(error['cls']) raise exc_cls(error['val']) except ImportError: # NOTE(flaper87): The exception # class couldn't be imported, using # a generic exception. raise exception.RPCError(**error) return content def __getattr__(self, item): """ This method returns a method_proxy that will execute the rpc call in the registry service. """ if item.startswith('_'): raise AttributeError(item) def method_proxy(**kw): return self.do_request(item, **kw) return method_proxy glance-12.0.0/glance/common/location_strategy/0000775000567000056710000000000012701407204022434 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/common/location_strategy/__init__.py0000664000567000056710000001006012701407047024547 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from oslo_log import log as logging import stevedore from glance.i18n import _, _LE location_strategy_opts = [ cfg.StrOpt('location_strategy', default='location_order', choices=('location_order', 'store_type'), help=_("This value sets what strategy will be used to " "determine the image location order. Currently " "two strategies are packaged with Glance " "'location_order' and 'store_type'.")) ] CONF = cfg.CONF CONF.register_opts(location_strategy_opts) LOG = logging.getLogger(__name__) def _load_strategies(): """Load all strategy modules.""" modules = {} namespace = "glance.common.image_location_strategy.modules" ex = stevedore.extension.ExtensionManager(namespace) for module_name in ex.names(): try: mgr = stevedore.driver.DriverManager( namespace=namespace, name=module_name, invoke_on_load=False) # Obtain module name strategy_name = str(mgr.driver.get_strategy_name()) if strategy_name in modules: msg = (_('%(strategy)s is registered as a module twice. ' '%(module)s is not being used.') % {'strategy': strategy_name, 'module': module_name}) LOG.warn(msg) else: # Initialize strategy module mgr.driver.init() modules[strategy_name] = mgr.driver except Exception as e: LOG.error(_LE("Failed to load location strategy module " "%(module)s: %(e)s") % {'module': module_name, 'e': e}) return modules _available_strategies = _load_strategies() # TODO(kadachi): Not used but don't remove this until glance_store # development/migration stage. def verify_location_strategy(conf=None, strategies=_available_strategies): """Validate user configured 'location_strategy' option value.""" if not conf: conf = CONF.location_strategy if conf not in strategies: msg = (_('Invalid location_strategy option: %(name)s. ' 'The valid strategy option(s) is(are): %(strategies)s') % {'name': conf, 'strategies': ", ".join(strategies.keys())}) LOG.error(msg) raise RuntimeError(msg) def get_ordered_locations(locations, **kwargs): """ Order image location list by configured strategy. :param locations: The original image location list. :param kwargs: Strategy-specific arguments for under layer strategy module. :returns: The image location list with strategy-specific order. """ if not locations: return [] strategy_module = _available_strategies[CONF.location_strategy] return strategy_module.get_ordered_locations(copy.deepcopy(locations), **kwargs) def choose_best_location(locations, **kwargs): """ Choose best location from image location list by configured strategy. :param locations: The original image location list. :param kwargs: Strategy-specific arguments for under layer strategy module. :returns: The best location from image location list. """ locations = get_ordered_locations(locations, **kwargs) if locations: return locations[0] else: return None glance-12.0.0/glance/common/location_strategy/location_order.py0000664000567000056710000000207012701407047026015 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Image location order based location strategy module""" def get_strategy_name(): """Return strategy module name.""" return 'location_order' def init(): """Initialize strategy module.""" pass def get_ordered_locations(locations, **kwargs): """ Order image location list. :param locations: The original image location list. :returns: The image location list with original natural order. """ return locations glance-12.0.0/glance/common/location_strategy/store_type.py0000664000567000056710000001013312701407047025206 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage preference based location strategy module""" from oslo_config import cfg import six import six.moves.urllib.parse as urlparse from glance.i18n import _ store_type_opts = [ cfg.ListOpt("store_type_preference", default=[], help=_("The store names to use to get store preference order. " "The name must be registered by one of the stores " "defined by the 'stores' config option. " "This option will be applied when you using " "'store_type' option as image location strategy " "defined by the 'location_strategy' config option.")) ] CONF = cfg.CONF CONF.register_opts(store_type_opts, group='store_type_location_strategy') _STORE_TO_SCHEME_MAP = {} def get_strategy_name(): """Return strategy module name.""" return 'store_type' def init(): """Initialize strategy module.""" # NOTE(zhiyan): We have a plan to do a reusable glance client library for # all clients like Nova and Cinder in near period, it would be able to # contains common code to provide uniform image service interface for them, # just like Brick in Cinder, this code can be moved to there and shared # between Glance and client both side. So this implementation as far as # possible to prevent make relationships with Glance(server)-specific code, # for example: using functions within store module to validate # 'store_type_preference' option. mapping = {'filesystem': ['file', 'filesystem'], 'http': ['http', 'https'], 'rbd': ['rbd'], 's3': ['s3', 's3+http', 's3+https'], 'swift': ['swift', 'swift+https', 'swift+http'], 'sheepdog': ['sheepdog'], 'cinder': ['cinder'], 'vmware_datastore': ['vsphere']} _STORE_TO_SCHEME_MAP.clear() _STORE_TO_SCHEME_MAP.update(mapping) def get_ordered_locations(locations, uri_key='url', **kwargs): """ Order image location list. :param locations: The original image location list. :param uri_key: The key name for location URI in image location dictionary. :returns: The image location list with preferred store type order. """ def _foreach_store_type_preference(): store_types = CONF.store_type_location_strategy.store_type_preference for preferred_store in store_types: preferred_store = str(preferred_store).strip() if not preferred_store: continue yield preferred_store if not locations: return locations preferences = {} others = [] for preferred_store in _foreach_store_type_preference(): preferences[preferred_store] = [] for location in locations: uri = location.get(uri_key) if not uri: continue pieces = urlparse.urlparse(uri.strip()) store_name = None for store, schemes in six.iteritems(_STORE_TO_SCHEME_MAP): if pieces.scheme.strip() in schemes: store_name = store break if store_name in preferences: preferences[store_name].append(location) else: others.append(location) ret = [] # NOTE(zhiyan): While configuration again since py26 does not support # ordereddict container. for preferred_store in _foreach_store_type_preference(): ret.extend(preferences[preferred_store]) ret.extend(others) return ret glance-12.0.0/glance/common/client.py0000664000567000056710000005525312701407051020544 0ustar jenkinsjenkins00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # HTTPSClientAuthConnection code comes courtesy of ActiveState website: # http://code.activestate.com/recipes/ # 577548-https-httplib-client-connection-with-certificate-v/ import collections import copy import errno import functools import os import re try: from eventlet.green import socket from eventlet.green import ssl except ImportError: import socket import ssl import osprofiler.web try: import sendfile # noqa SENDFILE_SUPPORTED = True except ImportError: SENDFILE_SUPPORTED = False from oslo_log import log as logging from oslo_utils import encodeutils import six from six.moves import http_client # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import six.moves.urllib.parse as urlparse from glance.common import auth from glance.common import exception from glance.common import utils from glance.i18n import _ LOG = logging.getLogger(__name__) # common chunk size for get and put CHUNKSIZE = 65536 VERSION_REGEX = re.compile(r"/?v[0-9\.]+") def handle_unauthenticated(func): """ Wrap a function to re-authenticate and retry. """ @functools.wraps(func) def wrapped(self, *args, **kwargs): try: return func(self, *args, **kwargs) except exception.NotAuthenticated: self._authenticate(force_reauth=True) return func(self, *args, **kwargs) return wrapped def handle_redirects(func): """ Wrap the _do_request function to handle HTTP redirects. """ MAX_REDIRECTS = 5 @functools.wraps(func) def wrapped(self, method, url, body, headers): for i in range(MAX_REDIRECTS): try: return func(self, method, url, body, headers) except exception.RedirectException as redirect: if redirect.url is None: raise exception.InvalidRedirect() url = redirect.url raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS) return wrapped class HTTPSClientAuthConnection(http_client.HTTPSConnection): """ Class to make a HTTPS connection, with support for full client-based SSL Authentication :see http://code.activestate.com/recipes/ 577548-https-httplib-client-connection-with-certificate-v/ """ def __init__(self, host, port, key_file, cert_file, ca_file, timeout=None, insecure=False): http_client.HTTPSConnection.__init__(self, host, port, key_file=key_file, cert_file=cert_file) self.key_file = key_file self.cert_file = cert_file self.ca_file = ca_file self.timeout = timeout self.insecure = insecure def connect(self): """ Connect to a host on a given (SSL) port. If ca_file is pointing somewhere, use it to check Server Certificate. Redefined/copied and extended from httplib.py:1105 (Python 2.6.x). This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to ssl.wrap_socket(), which forces SSL to check server certificate against our client certificate. """ sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() # Check CA file unless 'insecure' is specified if self.insecure is True: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=ssl.CERT_NONE) else: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ca_certs=self.ca_file, cert_reqs=ssl.CERT_REQUIRED) class BaseClient(object): """A base client class""" DEFAULT_PORT = 80 DEFAULT_DOC_ROOT = None # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora, # Suse, FreeBSD/OpenBSD DEFAULT_CA_FILE_PATH = ('/etc/ssl/certs/ca-certificates.crt:' '/etc/pki/tls/certs/ca-bundle.crt:' '/etc/ssl/ca-bundle.pem:' '/etc/ssl/cert.pem') OK_RESPONSE_CODES = ( http_client.OK, http_client.CREATED, http_client.ACCEPTED, http_client.NO_CONTENT, ) REDIRECT_RESPONSE_CODES = ( http_client.MOVED_PERMANENTLY, http_client.FOUND, http_client.SEE_OTHER, http_client.USE_PROXY, http_client.TEMPORARY_REDIRECT, ) def __init__(self, host, port=None, timeout=None, use_ssl=False, auth_token=None, creds=None, doc_root=None, key_file=None, cert_file=None, ca_file=None, insecure=False, configure_via_auth=True): """ Creates a new client to some service. :param host: The host where service resides :param port: The port where service resides :param timeout: Connection timeout. :param use_ssl: Should we use HTTPS? :param auth_token: The auth token to pass to the server :param creds: The credentials to pass to the auth plugin :param doc_root: Prefix for all URLs we request from host :param key_file: Optional PEM-formatted file that contains the private key. If use_ssl is True, and this param is None (the default), then an environ variable GLANCE_CLIENT_KEY_FILE is looked for. If no such environ variable is found, ClientConnectionError will be raised. :param cert_file: Optional PEM-formatted certificate chain file. If use_ssl is True, and this param is None (the default), then an environ variable GLANCE_CLIENT_CERT_FILE is looked for. If no such environ variable is found, ClientConnectionError will be raised. :param ca_file: Optional CA cert file to use in SSL connections If use_ssl is True, and this param is None (the default), then an environ variable GLANCE_CLIENT_CA_FILE is looked for. :param insecure: Optional. If set then the server's certificate will not be verified. :param configure_via_auth: Optional. Defaults to True. If set, the URL returned from the service catalog for the image endpoint will **override** the URL supplied to in the host parameter. """ self.host = host self.port = port or self.DEFAULT_PORT self.timeout = timeout # A value of '0' implies never timeout if timeout == 0: self.timeout = None self.use_ssl = use_ssl self.auth_token = auth_token self.creds = creds or {} self.connection = None self.configure_via_auth = configure_via_auth # doc_root can be a nullstring, which is valid, and why we # cannot simply do doc_root or self.DEFAULT_DOC_ROOT below. self.doc_root = (doc_root if doc_root is not None else self.DEFAULT_DOC_ROOT) self.key_file = key_file self.cert_file = cert_file self.ca_file = ca_file self.insecure = insecure self.auth_plugin = self.make_auth_plugin(self.creds, self.insecure) self.connect_kwargs = self.get_connect_kwargs() def get_connect_kwargs(self): # Both secure and insecure connections have a timeout option connect_kwargs = {'timeout': self.timeout} if self.use_ssl: if self.key_file is None: self.key_file = os.environ.get('GLANCE_CLIENT_KEY_FILE') if self.cert_file is None: self.cert_file = os.environ.get('GLANCE_CLIENT_CERT_FILE') if self.ca_file is None: self.ca_file = os.environ.get('GLANCE_CLIENT_CA_FILE') # Check that key_file/cert_file are either both set or both unset if self.cert_file is not None and self.key_file is None: msg = _("You have selected to use SSL in connecting, " "and you have supplied a cert, " "however you have failed to supply either a " "key_file parameter or set the " "GLANCE_CLIENT_KEY_FILE environ variable") raise exception.ClientConnectionError(msg) if self.key_file is not None and self.cert_file is None: msg = _("You have selected to use SSL in connecting, " "and you have supplied a key, " "however you have failed to supply either a " "cert_file parameter or set the " "GLANCE_CLIENT_CERT_FILE environ variable") raise exception.ClientConnectionError(msg) if (self.key_file is not None and not os.path.exists(self.key_file)): msg = _("The key file you specified %s does not " "exist") % self.key_file raise exception.ClientConnectionError(msg) connect_kwargs['key_file'] = self.key_file if (self.cert_file is not None and not os.path.exists(self.cert_file)): msg = _("The cert file you specified %s does not " "exist") % self.cert_file raise exception.ClientConnectionError(msg) connect_kwargs['cert_file'] = self.cert_file if (self.ca_file is not None and not os.path.exists(self.ca_file)): msg = _("The CA file you specified %s does not " "exist") % self.ca_file raise exception.ClientConnectionError(msg) if self.ca_file is None: for ca in self.DEFAULT_CA_FILE_PATH.split(":"): if os.path.exists(ca): self.ca_file = ca break connect_kwargs['ca_file'] = self.ca_file connect_kwargs['insecure'] = self.insecure return connect_kwargs def configure_from_url(self, url): """ Setups the connection based on the given url. The form is: ://:port/doc_root """ LOG.debug("Configuring from URL: %s", url) parsed = urlparse.urlparse(url) self.use_ssl = parsed.scheme == 'https' self.host = parsed.hostname self.port = parsed.port or 80 self.doc_root = parsed.path.rstrip('/') # We need to ensure a version identifier is appended to the doc_root if not VERSION_REGEX.match(self.doc_root): if self.DEFAULT_DOC_ROOT: doc_root = self.DEFAULT_DOC_ROOT.lstrip('/') self.doc_root += '/' + doc_root LOG.debug("Appending doc_root %(doc_root)s to URL %(url)s", {'doc_root': doc_root, 'url': url}) # ensure connection kwargs are re-evaluated after the service catalog # publicURL is parsed for potential SSL usage self.connect_kwargs = self.get_connect_kwargs() def make_auth_plugin(self, creds, insecure): """ Returns an instantiated authentication plugin. """ strategy = creds.get('strategy', 'noauth') plugin = auth.get_plugin_from_strategy(strategy, creds, insecure, self.configure_via_auth) return plugin def get_connection_type(self): """ Returns the proper connection type """ if self.use_ssl: return HTTPSClientAuthConnection else: return http_client.HTTPConnection def _authenticate(self, force_reauth=False): """ Use the authentication plugin to authenticate and set the auth token. :param force_reauth: For re-authentication to bypass cache. """ auth_plugin = self.auth_plugin if not auth_plugin.is_authenticated or force_reauth: auth_plugin.authenticate() self.auth_token = auth_plugin.auth_token management_url = auth_plugin.management_url if management_url and self.configure_via_auth: self.configure_from_url(management_url) @handle_unauthenticated def do_request(self, method, action, body=None, headers=None, params=None): """ Make a request, returning an HTTP response object. :param method: HTTP verb (GET, POST, PUT, etc.) :param action: Requested path to append to self.doc_root :param body: Data to send in the body of the request :param headers: Headers to send with the request :param params: Key/value pairs to use in query string :returns: HTTP response object """ if not self.auth_token: self._authenticate() url = self._construct_url(action, params) # NOTE(ameade): We need to copy these kwargs since they can be altered # in _do_request but we need the originals if handle_unauthenticated # calls this function again. return self._do_request(method=method, url=url, body=copy.deepcopy(body), headers=copy.deepcopy(headers)) def _construct_url(self, action, params=None): """ Create a URL object we can use to pass to _do_request(). """ action = urlparse.quote(action) path = '/'.join([self.doc_root or '', action.lstrip('/')]) scheme = "https" if self.use_ssl else "http" netloc = "%s:%d" % (self.host, self.port) if isinstance(params, dict): for (key, value) in list(params.items()): if value is None: del params[key] continue if not isinstance(value, six.string_types): value = str(value) params[key] = encodeutils.safe_encode(value) query = urlparse.urlencode(params) else: query = None url = urlparse.ParseResult(scheme, netloc, path, '', query, '') log_msg = _("Constructed URL: %s") LOG.debug(log_msg, url.geturl()) return url def _encode_headers(self, headers): """ Encodes headers. Note: This should be used right before sending anything out. :param headers: Headers to encode :returns: Dictionary with encoded headers' names and values """ if six.PY3: to_str = str else: to_str = encodeutils.safe_encode return {to_str(h): to_str(v) for h, v in six.iteritems(headers)} @handle_redirects def _do_request(self, method, url, body, headers): """ Connects to the server and issues a request. Handles converting any returned HTTP error status codes to OpenStack/Glance exceptions and closing the server connection. Returns the result data, or raises an appropriate exception. :param method: HTTP method ("GET", "POST", "PUT", etc...) :param url: urlparse.ParsedResult object with URL information :param body: data to send (as string, filelike or iterable), or None (default) :param headers: mapping of key/value pairs to add as headers :note If the body param has a read attribute, and method is either POST or PUT, this method will automatically conduct a chunked-transfer encoding and use the body as a file object or iterable, transferring chunks of data using the connection's send() method. This allows large objects to be transferred efficiently without buffering the entire body in memory. """ if url.query: path = url.path + "?" + url.query else: path = url.path try: connection_type = self.get_connection_type() headers = self._encode_headers(headers or {}) headers.update(osprofiler.web.get_trace_id_headers()) if 'x-auth-token' not in headers and self.auth_token: headers['x-auth-token'] = self.auth_token c = connection_type(url.hostname, url.port, **self.connect_kwargs) def _pushing(method): return method.lower() in ('post', 'put') def _simple(body): return body is None or isinstance(body, bytes) def _filelike(body): return hasattr(body, 'read') def _sendbody(connection, iter): connection.endheaders() for sent in iter: # iterator has done the heavy lifting pass def _chunkbody(connection, iter): connection.putheader('Transfer-Encoding', 'chunked') connection.endheaders() for chunk in iter: connection.send('%x\r\n%s\r\n' % (len(chunk), chunk)) connection.send('0\r\n\r\n') # Do a simple request or a chunked request, depending # on whether the body param is file-like or iterable and # the method is PUT or POST # if not _pushing(method) or _simple(body): # Simple request... c.request(method, path, body, headers) elif _filelike(body) or self._iterable(body): c.putrequest(method, path) use_sendfile = self._sendable(body) # According to HTTP/1.1, Content-Length and Transfer-Encoding # conflict. for header, value in headers.items(): if use_sendfile or header.lower() != 'content-length': c.putheader(header, str(value)) iter = utils.chunkreadable(body) if use_sendfile: # send actual file without copying into userspace _sendbody(c, iter) else: # otherwise iterate and chunk _chunkbody(c, iter) else: raise TypeError('Unsupported image type: %s' % body.__class__) res = c.getresponse() def _retry(res): return res.getheader('Retry-After') def read_body(res): body = res.read() if six.PY3: body = body.decode('utf-8') return body status_code = self.get_status_code(res) if status_code in self.OK_RESPONSE_CODES: return res elif status_code in self.REDIRECT_RESPONSE_CODES: raise exception.RedirectException(res.getheader('Location')) elif status_code == http_client.UNAUTHORIZED: raise exception.NotAuthenticated(read_body(res)) elif status_code == http_client.FORBIDDEN: raise exception.Forbidden(read_body(res)) elif status_code == http_client.NOT_FOUND: raise exception.NotFound(read_body(res)) elif status_code == http_client.CONFLICT: raise exception.Duplicate(read_body(res)) elif status_code == http_client.BAD_REQUEST: raise exception.Invalid(read_body(res)) elif status_code == http_client.MULTIPLE_CHOICES: raise exception.MultipleChoices(body=read_body(res)) elif status_code == http_client.REQUEST_ENTITY_TOO_LARGE: raise exception.LimitExceeded(retry=_retry(res), body=read_body(res)) elif status_code == http_client.INTERNAL_SERVER_ERROR: raise exception.ServerError() elif status_code == http_client.SERVICE_UNAVAILABLE: raise exception.ServiceUnavailable(retry=_retry(res)) else: raise exception.UnexpectedStatus(status=status_code, body=read_body(res)) except (socket.error, IOError) as e: raise exception.ClientConnectionError(e) def _seekable(self, body): # pipes are not seekable, avoids sendfile() failure on e.g. # cat /path/to/image | glance add ... # or where add command is launched via popen try: os.lseek(body.fileno(), 0, os.SEEK_CUR) return True except OSError as e: return (e.errno != errno.ESPIPE) def _sendable(self, body): return (SENDFILE_SUPPORTED and hasattr(body, 'fileno') and self._seekable(body) and not self.use_ssl) def _iterable(self, body): return isinstance(body, collections.Iterable) def get_status_code(self, response): """ Returns the integer status code from the response, which can be either a Webob.Response (used in testing) or httplib.Response """ if hasattr(response, 'status_int'): return response.status_int else: return response.status def _extract_params(self, actual_params, allowed_params): """ Extract a subset of keys from a dictionary. The filters key will also be extracted, and each of its values will be returned as an individual param. :param actual_params: dict of keys to filter :param allowed_params: list of keys that 'actual_params' will be reduced to :retval subset of 'params' dict """ try: # expect 'filters' param to be a dict here result = dict(actual_params.get('filters')) except TypeError: result = {} for allowed_param in allowed_params: if allowed_param in actual_params: result[allowed_param] = actual_params[allowed_param] return result glance-12.0.0/glance/common/trust_auth.py0000664000567000056710000001157412701407047021473 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1.identity import v3 from keystoneauth1.loading import conf from keystoneauth1.loading import session from keystoneclient import exceptions as ks_exceptions from keystoneclient.v3 import client as ks_client from oslo_config import cfg from oslo_log import log as logging CONF = cfg.CONF LOG = logging.getLogger(__name__) class TokenRefresher(object): """Class that responsible for token refreshing with trusts""" def __init__(self, user_plugin, user_project, user_roles): """Prepare all parameters and clients required to refresh token""" # step 1: Prepare parameters required to connect to keystone self.auth_url = CONF.keystone_authtoken.auth_uri if not self.auth_url.endswith('/v3'): self.auth_url += '/v3' self.ssl_settings = { 'cacert': CONF.keystone_authtoken.cafile, 'insecure': CONF.keystone_authtoken.insecure, 'cert': CONF.keystone_authtoken.certfile, 'key': CONF.keystone_authtoken.keyfile, } # step 2: create trust to ensure that we can always update token # trustor = user who made the request trustor_client = self._load_client(user_plugin, self.ssl_settings) trustor_id = trustor_client.session.get_user_id() # get trustee user client that impersonates main user trustee_user_auth = conf.load_from_conf_options(CONF, 'keystone_authtoken') # save service user client because we need new service token # to refresh trust-scoped client later self.trustee_user_client = self._load_client(trustee_user_auth, self.ssl_settings) trustee_id = self.trustee_user_client.session.get_user_id() self.trust_id = trustor_client.trusts.create(trustor_user=trustor_id, trustee_user=trustee_id, impersonation=True, role_names=user_roles, project=user_project).id LOG.debug("Trust %s has been created.", self.trust_id) # step 3: postpone trust-scoped client initialization # until we need to refresh the token self.trustee_client = None def refresh_token(self): """Receive new token if user need to update old token :return: new token that can be used for authentication """ LOG.debug("Requesting the new token with trust %s", self.trust_id) if self.trustee_client is None: self.trustee_client = self._refresh_trustee_client() try: return self.trustee_client.session.get_token() except ks_exceptions.Unauthorized: # in case of Unauthorized exceptions try to refresh client because # service user token may expired self.trustee_client = self._refresh_trustee_client() return self.trustee_client.session.get_token() def release_resources(self): """Release keystone resources required for refreshing""" try: if self.trustee_client is None: self._refresh_trustee_client().trusts.delete(self.trust_id) else: self.trustee_client.trusts.delete(self.trust_id) except ks_exceptions.Unauthorized: # service user token may expire when we are trying to delete token # so need to update client to ensure that this is not the reason # of failure self.trustee_client = self._refresh_trustee_client() self.trustee_client.trusts.delete(self.trust_id) def _refresh_trustee_client(self): trustee_token = self.trustee_user_client.session.get_token() trustee_auth = v3.Token( trust_id=self.trust_id, token=trustee_token, auth_url=self.auth_url ) return self._load_client(trustee_auth, self.ssl_settings) @staticmethod def _load_client(plugin, ssl_settings): # load client from auth settings and user plugin sess = session.Session().load_from_options( auth=plugin, **ssl_settings) return ks_client.Client(session=sess) glance-12.0.0/glance/notifier.py0000664000567000056710000007460612701407047017625 0ustar jenkinsjenkins00000000000000# Copyright 2011, OpenStack Foundation # Copyright 2012, Red Hat, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import glance_store from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import encodeutils from oslo_utils import excutils import six import webob from glance.common import exception from glance.common import timeutils from glance.domain import proxy as domain_proxy from glance.i18n import _, _LE notifier_opts = [ cfg.StrOpt('default_publisher_id', default="image.localhost", help='Default publisher_id for outgoing notifications.'), cfg.ListOpt('disabled_notifications', default=[], help='List of disabled notifications. A notification can be ' 'given either as a notification type to disable a single ' 'event, or as a notification group prefix to disable all ' 'events within a group. Example: if this config option ' 'is set to ["image.create", "metadef_namespace"], then ' '"image.create" notification will not be sent after ' 'image is created and none of the notifications for ' 'metadefinition namespaces will be sent.'), ] CONF = cfg.CONF CONF.register_opts(notifier_opts) LOG = logging.getLogger(__name__) _ALIASES = { 'glance.openstack.common.rpc.impl_kombu': 'rabbit', 'glance.openstack.common.rpc.impl_qpid': 'qpid', 'glance.openstack.common.rpc.impl_zmq': 'zmq', } def set_defaults(control_exchange='glance'): oslo_messaging.set_transport_defaults(control_exchange) def get_transport(): return oslo_messaging.get_transport(CONF, aliases=_ALIASES) class Notifier(object): """Uses a notification strategy to send out messages about events.""" def __init__(self): publisher_id = CONF.default_publisher_id self._transport = get_transport() self._notifier = oslo_messaging.Notifier(self._transport, publisher_id=publisher_id) def warn(self, event_type, payload): self._notifier.warn({}, event_type, payload) def info(self, event_type, payload): self._notifier.info({}, event_type, payload) def error(self, event_type, payload): self._notifier.error({}, event_type, payload) def _get_notification_group(notification): return notification.split('.', 1)[0] def _is_notification_enabled(notification): disabled_notifications = CONF.disabled_notifications notification_group = _get_notification_group(notification) notifications = (notification, notification_group) for disabled_notification in disabled_notifications: if disabled_notification in notifications: return False return True def _send_notification(notify, notification_type, payload): if _is_notification_enabled(notification_type): notify(notification_type, payload) def format_image_notification(image): """ Given a glance.domain.Image object, return a dictionary of relevant notification information. We purposely do not include 'location' as it may contain credentials. """ return { 'id': image.image_id, 'name': image.name, 'status': image.status, 'created_at': timeutils.isotime(image.created_at), 'updated_at': timeutils.isotime(image.updated_at), 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'protected': image.protected, 'checksum': image.checksum, 'owner': image.owner, 'disk_format': image.disk_format, 'container_format': image.container_format, 'size': image.size, 'virtual_size': image.virtual_size, 'is_public': image.visibility == 'public', 'properties': dict(image.extra_properties), 'tags': list(image.tags), 'deleted': False, 'deleted_at': None, } def format_image_member_notification(image_member): """Given a glance.domain.ImageMember object, return a dictionary of relevant notification information. """ return { 'image_id': image_member.image_id, 'member_id': image_member.member_id, 'status': image_member.status, 'created_at': timeutils.isotime(image_member.created_at), 'updated_at': timeutils.isotime(image_member.updated_at), 'deleted': False, 'deleted_at': None, } def format_task_notification(task): # NOTE(nikhil): input is not passed to the notifier payload as it may # contain sensitive info. return { 'id': task.task_id, 'type': task.type, 'status': task.status, 'result': None, 'owner': task.owner, 'message': None, 'expires_at': timeutils.isotime(task.expires_at), 'created_at': timeutils.isotime(task.created_at), 'updated_at': timeutils.isotime(task.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_namespace_notification(metadef_namespace): return { 'namespace': metadef_namespace.namespace, 'namespace_old': metadef_namespace.namespace, 'display_name': metadef_namespace.display_name, 'protected': metadef_namespace.protected, 'visibility': metadef_namespace.visibility, 'owner': metadef_namespace.owner, 'description': metadef_namespace.description, 'created_at': timeutils.isotime(metadef_namespace.created_at), 'updated_at': timeutils.isotime(metadef_namespace.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_object_notification(metadef_object): object_properties = metadef_object.properties or {} properties = [] for name, prop in six.iteritems(object_properties): object_property = _format_metadef_object_property(name, prop) properties.append(object_property) return { 'namespace': metadef_object.namespace, 'name': metadef_object.name, 'name_old': metadef_object.name, 'properties': properties, 'required': metadef_object.required, 'description': metadef_object.description, 'created_at': timeutils.isotime(metadef_object.created_at), 'updated_at': timeutils.isotime(metadef_object.updated_at), 'deleted': False, 'deleted_at': None, } def _format_metadef_object_property(name, metadef_property): return { 'name': name, 'type': metadef_property.type or None, 'title': metadef_property.title or None, 'description': metadef_property.description or None, 'default': metadef_property.default or None, 'minimum': metadef_property.minimum or None, 'maximum': metadef_property.maximum or None, 'enum': metadef_property.enum or None, 'pattern': metadef_property.pattern or None, 'minLength': metadef_property.minLength or None, 'maxLength': metadef_property.maxLength or None, 'confidential': metadef_property.confidential or None, 'items': metadef_property.items or None, 'uniqueItems': metadef_property.uniqueItems or None, 'minItems': metadef_property.minItems or None, 'maxItems': metadef_property.maxItems or None, 'additionalItems': metadef_property.additionalItems or None, } def format_metadef_property_notification(metadef_property): schema = metadef_property.schema return { 'namespace': metadef_property.namespace, 'name': metadef_property.name, 'name_old': metadef_property.name, 'type': schema.get('type'), 'title': schema.get('title'), 'description': schema.get('description'), 'default': schema.get('default'), 'minimum': schema.get('minimum'), 'maximum': schema.get('maximum'), 'enum': schema.get('enum'), 'pattern': schema.get('pattern'), 'minLength': schema.get('minLength'), 'maxLength': schema.get('maxLength'), 'confidential': schema.get('confidential'), 'items': schema.get('items'), 'uniqueItems': schema.get('uniqueItems'), 'minItems': schema.get('minItems'), 'maxItems': schema.get('maxItems'), 'additionalItems': schema.get('additionalItems'), 'deleted': False, 'deleted_at': None, } def format_metadef_resource_type_notification(metadef_resource_type): return { 'namespace': metadef_resource_type.namespace, 'name': metadef_resource_type.name, 'name_old': metadef_resource_type.name, 'prefix': metadef_resource_type.prefix, 'properties_target': metadef_resource_type.properties_target, 'created_at': timeutils.isotime(metadef_resource_type.created_at), 'updated_at': timeutils.isotime(metadef_resource_type.updated_at), 'deleted': False, 'deleted_at': None, } def format_metadef_tag_notification(metadef_tag): return { 'namespace': metadef_tag.namespace, 'name': metadef_tag.name, 'name_old': metadef_tag.name, 'created_at': timeutils.isotime(metadef_tag.created_at), 'updated_at': timeutils.isotime(metadef_tag.updated_at), 'deleted': False, 'deleted_at': None, } class NotificationBase(object): def get_payload(self, obj): return {} def send_notification(self, notification_id, obj, extra_payload=None): payload = self.get_payload(obj) if extra_payload is not None: payload.update(extra_payload) _send_notification(self.notifier.info, notification_id, payload) @six.add_metaclass(abc.ABCMeta) class NotificationProxy(NotificationBase): def __init__(self, repo, context, notifier): self.repo = repo self.context = context self.notifier = notifier super_class = self.get_super_class() super_class.__init__(self, repo) @abc.abstractmethod def get_super_class(self): pass @six.add_metaclass(abc.ABCMeta) class NotificationRepoProxy(NotificationBase): def __init__(self, repo, context, notifier): self.repo = repo self.context = context self.notifier = notifier proxy_kwargs = {'context': self.context, 'notifier': self.notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, repo, proxy_class, proxy_kwargs) @abc.abstractmethod def get_super_class(self): pass @abc.abstractmethod def get_proxy_class(self): pass @six.add_metaclass(abc.ABCMeta) class NotificationFactoryProxy(object): def __init__(self, factory, context, notifier): kwargs = {'context': context, 'notifier': notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, factory, proxy_class, kwargs) @abc.abstractmethod def get_super_class(self): pass @abc.abstractmethod def get_proxy_class(self): pass class ImageProxy(NotificationProxy, domain_proxy.Image): def get_super_class(self): return domain_proxy.Image def get_payload(self, obj): return format_image_notification(obj) def _format_image_send(self, bytes_sent): return { 'bytes_sent': bytes_sent, 'image_id': self.repo.image_id, 'owner_id': self.repo.owner, 'receiver_tenant_id': self.context.tenant, 'receiver_user_id': self.context.user, } def _get_chunk_data_iterator(self, data, chunk_size=None): sent = 0 for chunk in data: yield chunk sent += len(chunk) if sent != (chunk_size or self.repo.size): notify = self.notifier.error else: notify = self.notifier.info try: _send_notification(notify, 'image.send', self._format_image_send(sent)) except Exception as err: msg = (_LE("An error occurred during image.send" " notification: %(err)s") % {'err': err}) LOG.error(msg) def get_data(self, offset=0, chunk_size=None): # Due to the need of evaluating subsequent proxies, this one # should return a generator, the call should be done before # generator creation data = self.repo.get_data(offset=offset, chunk_size=chunk_size) return self._get_chunk_data_iterator(data, chunk_size=chunk_size) def set_data(self, data, size=None): self.send_notification('image.prepare', self.repo) notify_error = self.notifier.error try: self.repo.set_data(data, size) except glance_store.StorageFull as e: msg = (_("Image storage media is full: %s") % encodeutils.exception_to_unicode(e)) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) except glance_store.StorageWriteDenied as e: msg = (_("Insufficient permissions on image storage media: %s") % encodeutils.exception_to_unicode(e)) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPServiceUnavailable(explanation=msg) except ValueError as e: msg = (_("Cannot save data for image %(image_id)s: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) except exception.Duplicate as e: msg = (_("Unable to upload duplicate image data for image" "%(image_id)s: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPConflict(explanation=msg) except exception.Forbidden as e: msg = (_("Not allowed to upload image data for image %(image_id)s:" " %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.NotFound as e: exc_str = encodeutils.exception_to_unicode(e) msg = (_("Image %(image_id)s could not be found after upload." " The image may have been deleted during the upload:" " %(error)s") % {'image_id': self.repo.image_id, 'error': exc_str}) _send_notification(notify_error, 'image.upload', msg) raise webob.exc.HTTPNotFound(explanation=exc_str) except webob.exc.HTTPError as e: with excutils.save_and_reraise_exception(): msg = (_("Failed to upload image data for image %(image_id)s" " due to HTTP error: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_("Failed to upload image data for image %(image_id)s " "due to internal error: %(error)s") % {'image_id': self.repo.image_id, 'error': encodeutils.exception_to_unicode(e)}) _send_notification(notify_error, 'image.upload', msg) else: self.send_notification('image.upload', self.repo) self.send_notification('image.activate', self.repo) class ImageMemberProxy(NotificationProxy, domain_proxy.ImageMember): def get_super_class(self): return domain_proxy.ImageMember class ImageFactoryProxy(NotificationFactoryProxy, domain_proxy.ImageFactory): def get_super_class(self): return domain_proxy.ImageFactory def get_proxy_class(self): return ImageProxy class ImageRepoProxy(NotificationRepoProxy, domain_proxy.Repo): def get_super_class(self): return domain_proxy.Repo def get_proxy_class(self): return ImageProxy def get_payload(self, obj): return format_image_notification(obj) def save(self, image, from_state=None): super(ImageRepoProxy, self).save(image, from_state=from_state) self.send_notification('image.update', image) def add(self, image): super(ImageRepoProxy, self).add(image) self.send_notification('image.create', image) def remove(self, image): super(ImageRepoProxy, self).remove(image) self.send_notification('image.delete', image, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) class ImageMemberRepoProxy(NotificationBase, domain_proxy.MemberRepo): def __init__(self, repo, image, context, notifier): self.repo = repo self.image = image self.context = context self.notifier = notifier proxy_kwargs = {'context': self.context, 'notifier': self.notifier} proxy_class = self.get_proxy_class() super_class = self.get_super_class() super_class.__init__(self, image, repo, proxy_class, proxy_kwargs) def get_super_class(self): return domain_proxy.MemberRepo def get_proxy_class(self): return ImageMemberProxy def get_payload(self, obj): return format_image_member_notification(obj) def save(self, member, from_state=None): super(ImageMemberRepoProxy, self).save(member, from_state=from_state) self.send_notification('image.member.update', member) def add(self, member): super(ImageMemberRepoProxy, self).add(member) self.send_notification('image.member.create', member) def remove(self, member): super(ImageMemberRepoProxy, self).remove(member) self.send_notification('image.member.delete', member, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) class TaskProxy(NotificationProxy, domain_proxy.Task): def get_super_class(self): return domain_proxy.Task def get_payload(self, obj): return format_task_notification(obj) def begin_processing(self): super(TaskProxy, self).begin_processing() self.send_notification('task.processing', self.repo) def succeed(self, result): super(TaskProxy, self).succeed(result) self.send_notification('task.success', self.repo) def fail(self, message): super(TaskProxy, self).fail(message) self.send_notification('task.failure', self.repo) def run(self, executor): super(TaskProxy, self).run(executor) self.send_notification('task.run', self.repo) class TaskFactoryProxy(NotificationFactoryProxy, domain_proxy.TaskFactory): def get_super_class(self): return domain_proxy.TaskFactory def get_proxy_class(self): return TaskProxy class TaskRepoProxy(NotificationRepoProxy, domain_proxy.TaskRepo): def get_super_class(self): return domain_proxy.TaskRepo def get_proxy_class(self): return TaskProxy def get_payload(self, obj): return format_task_notification(obj) def add(self, task): result = super(TaskRepoProxy, self).add(task) self.send_notification('task.create', task) return result def remove(self, task): result = super(TaskRepoProxy, self).remove(task) self.send_notification('task.delete', task, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime() }) return result class TaskStubProxy(NotificationProxy, domain_proxy.TaskStub): def get_super_class(self): return domain_proxy.TaskStub class TaskStubRepoProxy(NotificationRepoProxy, domain_proxy.TaskStubRepo): def get_super_class(self): return domain_proxy.TaskStubRepo def get_proxy_class(self): return TaskStubProxy class MetadefNamespaceProxy(NotificationProxy, domain_proxy.MetadefNamespace): def get_super_class(self): return domain_proxy.MetadefNamespace class MetadefNamespaceFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefNamespaceFactory): def get_super_class(self): return domain_proxy.MetadefNamespaceFactory def get_proxy_class(self): return MetadefNamespaceProxy class MetadefNamespaceRepoProxy(NotificationRepoProxy, domain_proxy.MetadefNamespaceRepo): def get_super_class(self): return domain_proxy.MetadefNamespaceRepo def get_proxy_class(self): return MetadefNamespaceProxy def get_payload(self, obj): return format_metadef_namespace_notification(obj) def save(self, metadef_namespace): name = getattr(metadef_namespace, '_old_namespace', metadef_namespace.namespace) result = super(MetadefNamespaceRepoProxy, self).save(metadef_namespace) self.send_notification( 'metadef_namespace.update', metadef_namespace, extra_payload={ 'namespace_old': name, }) return result def add(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).add(metadef_namespace) self.send_notification('metadef_namespace.create', metadef_namespace) return result def remove(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove( metadef_namespace) self.send_notification( 'metadef_namespace.delete', metadef_namespace, extra_payload={'deleted': True, 'deleted_at': timeutils.isotime()} ) return result def remove_objects(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_objects( metadef_namespace) self.send_notification('metadef_namespace.delete_objects', metadef_namespace) return result def remove_properties(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_properties( metadef_namespace) self.send_notification('metadef_namespace.delete_properties', metadef_namespace) return result def remove_tags(self, metadef_namespace): result = super(MetadefNamespaceRepoProxy, self).remove_tags( metadef_namespace) self.send_notification('metadef_namespace.delete_tags', metadef_namespace) return result class MetadefObjectProxy(NotificationProxy, domain_proxy.MetadefObject): def get_super_class(self): return domain_proxy.MetadefObject class MetadefObjectFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefObjectFactory): def get_super_class(self): return domain_proxy.MetadefObjectFactory def get_proxy_class(self): return MetadefObjectProxy class MetadefObjectRepoProxy(NotificationRepoProxy, domain_proxy.MetadefObjectRepo): def get_super_class(self): return domain_proxy.MetadefObjectRepo def get_proxy_class(self): return MetadefObjectProxy def get_payload(self, obj): return format_metadef_object_notification(obj) def save(self, metadef_object): name = getattr(metadef_object, '_old_name', metadef_object.name) result = super(MetadefObjectRepoProxy, self).save(metadef_object) self.send_notification( 'metadef_object.update', metadef_object, extra_payload={ 'namespace': metadef_object.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_object): result = super(MetadefObjectRepoProxy, self).add(metadef_object) self.send_notification('metadef_object.create', metadef_object) return result def remove(self, metadef_object): result = super(MetadefObjectRepoProxy, self).remove(metadef_object) self.send_notification( 'metadef_object.delete', metadef_object, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_object.namespace.namespace } ) return result class MetadefPropertyProxy(NotificationProxy, domain_proxy.MetadefProperty): def get_super_class(self): return domain_proxy.MetadefProperty class MetadefPropertyFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefPropertyFactory): def get_super_class(self): return domain_proxy.MetadefPropertyFactory def get_proxy_class(self): return MetadefPropertyProxy class MetadefPropertyRepoProxy(NotificationRepoProxy, domain_proxy.MetadefPropertyRepo): def get_super_class(self): return domain_proxy.MetadefPropertyRepo def get_proxy_class(self): return MetadefPropertyProxy def get_payload(self, obj): return format_metadef_property_notification(obj) def save(self, metadef_property): name = getattr(metadef_property, '_old_name', metadef_property.name) result = super(MetadefPropertyRepoProxy, self).save(metadef_property) self.send_notification( 'metadef_property.update', metadef_property, extra_payload={ 'namespace': metadef_property.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_property): result = super(MetadefPropertyRepoProxy, self).add(metadef_property) self.send_notification('metadef_property.create', metadef_property) return result def remove(self, metadef_property): result = super(MetadefPropertyRepoProxy, self).remove(metadef_property) self.send_notification( 'metadef_property.delete', metadef_property, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_property.namespace.namespace } ) return result class MetadefResourceTypeProxy(NotificationProxy, domain_proxy.MetadefResourceType): def get_super_class(self): return domain_proxy.MetadefResourceType class MetadefResourceTypeFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefResourceTypeFactory): def get_super_class(self): return domain_proxy.MetadefResourceTypeFactory def get_proxy_class(self): return MetadefResourceTypeProxy class MetadefResourceTypeRepoProxy(NotificationRepoProxy, domain_proxy.MetadefResourceTypeRepo): def get_super_class(self): return domain_proxy.MetadefResourceTypeRepo def get_proxy_class(self): return MetadefResourceTypeProxy def get_payload(self, obj): return format_metadef_resource_type_notification(obj) def add(self, md_resource_type): result = super(MetadefResourceTypeRepoProxy, self).add( md_resource_type) self.send_notification('metadef_resource_type.create', md_resource_type) return result def remove(self, md_resource_type): result = super(MetadefResourceTypeRepoProxy, self).remove( md_resource_type) self.send_notification( 'metadef_resource_type.delete', md_resource_type, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': md_resource_type.namespace.namespace } ) return result class MetadefTagProxy(NotificationProxy, domain_proxy.MetadefTag): def get_super_class(self): return domain_proxy.MetadefTag class MetadefTagFactoryProxy(NotificationFactoryProxy, domain_proxy.MetadefTagFactory): def get_super_class(self): return domain_proxy.MetadefTagFactory def get_proxy_class(self): return MetadefTagProxy class MetadefTagRepoProxy(NotificationRepoProxy, domain_proxy.MetadefTagRepo): def get_super_class(self): return domain_proxy.MetadefTagRepo def get_proxy_class(self): return MetadefTagProxy def get_payload(self, obj): return format_metadef_tag_notification(obj) def save(self, metadef_tag): name = getattr(metadef_tag, '_old_name', metadef_tag.name) result = super(MetadefTagRepoProxy, self).save(metadef_tag) self.send_notification( 'metadef_tag.update', metadef_tag, extra_payload={ 'namespace': metadef_tag.namespace.namespace, 'name_old': name, }) return result def add(self, metadef_tag): result = super(MetadefTagRepoProxy, self).add(metadef_tag) self.send_notification('metadef_tag.create', metadef_tag) return result def add_tags(self, metadef_tags): result = super(MetadefTagRepoProxy, self).add_tags(metadef_tags) for metadef_tag in metadef_tags: self.send_notification('metadef_tag.create', metadef_tag) return result def remove(self, metadef_tag): result = super(MetadefTagRepoProxy, self).remove(metadef_tag) self.send_notification( 'metadef_tag.delete', metadef_tag, extra_payload={ 'deleted': True, 'deleted_at': timeutils.isotime(), 'namespace': metadef_tag.namespace.namespace } ) return result glance-12.0.0/glance/version.py0000664000567000056710000000125612701407047017462 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('glance') glance-12.0.0/glance/db/0000775000567000056710000000000012701407204015777 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/__init__.py0000664000567000056710000011302612701407047020120 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2012 OpenStack Foundation # Copyright 2013 IBM Corp. # Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils from wsme.rest import json from glance.api.v2.model.metadef_property_type import PropertyType from glance.common import crypt from glance.common import exception from glance.common.glare import serialization from glance.common import location_strategy import glance.domain import glance.domain.proxy from glance import glare as ga from glance.i18n import _ CONF = cfg.CONF CONF.import_opt('image_size_cap', 'glance.common.config') CONF.import_opt('metadata_encryption_key', 'glance.common.config') def get_api(): api = importutils.import_module(CONF.data_api) if hasattr(api, 'configure'): api.configure() return api def unwrap(db_api): return db_api # attributes common to all models BASE_MODEL_ATTRS = set(['id', 'created_at', 'updated_at', 'deleted_at', 'deleted']) IMAGE_ATTRS = BASE_MODEL_ATTRS | set(['name', 'status', 'size', 'virtual_size', 'disk_format', 'container_format', 'min_disk', 'min_ram', 'is_public', 'locations', 'checksum', 'owner', 'protected']) class ArtifactRepo(object): fields = ['id', 'name', 'version', 'type_name', 'type_version', 'visibility', 'state', 'owner', 'scope', 'created_at', 'updated_at', 'tags', 'dependencies', 'blobs', 'properties'] def __init__(self, context, db_api, plugins): self.context = context self.db_api = db_api self.plugins = plugins def get(self, artifact_id, type_name=None, type_version=None, show_level=None, include_deleted=False): if show_level is None: show_level = ga.Showlevel.BASIC try: db_api_artifact = self.db_api.artifact_get(self.context, artifact_id, type_name, type_version, show_level) if db_api_artifact["state"] == 'deleted' and not include_deleted: raise exception.ArtifactNotFound(artifact_id) except (exception.ArtifactNotFound, exception.ArtifactForbidden): msg = _("No artifact found with ID %s") % artifact_id raise exception.ArtifactNotFound(msg) return serialization.deserialize_from_db(db_api_artifact, self.plugins) def list(self, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, show_level=None): sort_keys = ['created_at'] if sort_keys is None else sort_keys sort_dirs = ['desc'] if sort_dirs is None else sort_dirs if show_level is None: show_level = ga.Showlevel.NONE db_api_artifacts = self.db_api.artifact_get_all( self.context, filters=filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, show_level=show_level) artifacts = [] for db_api_artifact in db_api_artifacts: artifact = serialization.deserialize_from_db(db_api_artifact, self.plugins) artifacts.append(artifact) return artifacts def _format_artifact_from_db(self, db_artifact): kwargs = {k: db_artifact.get(k, None) for k in self.fields} return glance.domain.Artifact(**kwargs) def add(self, artifact): artifact_values = serialization.serialize_for_db(artifact) artifact_values['updated_at'] = artifact.updated_at self.db_api.artifact_create(self.context, artifact_values, artifact.type_name, artifact.type_version) def save(self, artifact): artifact_values = serialization.serialize_for_db(artifact) try: db_api_artifact = self.db_api.artifact_update( self.context, artifact_values, artifact.id, artifact.type_name, artifact.type_version) except (exception.ArtifactNotFound, exception.ArtifactForbidden): msg = _("No artifact found with ID %s") % artifact.id raise exception.ArtifactNotFound(msg) return serialization.deserialize_from_db(db_api_artifact, self.plugins) def remove(self, artifact): try: self.db_api.artifact_delete(self.context, artifact.id, artifact.type_name, artifact.type_version) except (exception.NotFound, exception.Forbidden): msg = _("No artifact found with ID %s") % artifact.id raise exception.ArtifactNotFound(msg) def publish(self, artifact): try: artifact_changed = ( self.db_api.artifact_publish( self.context, artifact.id, artifact.type_name, artifact.type_version)) return serialization.deserialize_from_db(artifact_changed, self.plugins) except (exception.NotFound, exception.Forbidden): msg = _("No artifact found with ID %s") % artifact.id raise exception.ArtifactNotFound(msg) class ImageRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api def get(self, image_id): try: db_api_image = dict(self.db_api.image_get(self.context, image_id)) if db_api_image['deleted']: raise exception.ImageNotFound() except (exception.ImageNotFound, exception.Forbidden): msg = _("No image found with ID %s") % image_id raise exception.ImageNotFound(msg) tags = self.db_api.image_tag_get_all(self.context, image_id) image = self._format_image_from_db(db_api_image, tags) return ImageProxy(image, self.context, self.db_api) def list(self, marker=None, limit=None, sort_key=None, sort_dir=None, filters=None, member_status='accepted'): sort_key = ['created_at'] if not sort_key else sort_key sort_dir = ['desc'] if not sort_dir else sort_dir db_api_images = self.db_api.image_get_all( self.context, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, member_status=member_status, return_tag=True) images = [] for db_api_image in db_api_images: db_image = dict(db_api_image) image = self._format_image_from_db(db_image, db_image['tags']) images.append(image) return images def _format_image_from_db(self, db_image, db_tags): visibility = 'public' if db_image['is_public'] else 'private' properties = {} for prop in db_image.pop('properties'): # NOTE(markwash) db api requires us to filter deleted if not prop['deleted']: properties[prop['name']] = prop['value'] locations = [loc for loc in db_image['locations'] if loc['status'] == 'active'] if CONF.metadata_encryption_key: key = CONF.metadata_encryption_key for l in locations: l['url'] = crypt.urlsafe_decrypt(key, l['url']) return glance.domain.Image( image_id=db_image['id'], name=db_image['name'], status=db_image['status'], created_at=db_image['created_at'], updated_at=db_image['updated_at'], visibility=visibility, min_disk=db_image['min_disk'], min_ram=db_image['min_ram'], protected=db_image['protected'], locations=location_strategy.get_ordered_locations(locations), checksum=db_image['checksum'], owner=db_image['owner'], disk_format=db_image['disk_format'], container_format=db_image['container_format'], size=db_image['size'], virtual_size=db_image['virtual_size'], extra_properties=properties, tags=db_tags ) def _format_image_to_db(self, image): locations = image.locations if CONF.metadata_encryption_key: key = CONF.metadata_encryption_key ld = [] for loc in locations: url = crypt.urlsafe_encrypt(key, loc['url']) ld.append({'url': url, 'metadata': loc['metadata'], 'status': loc['status'], # NOTE(zhiyan): New location has no ID field. 'id': loc.get('id')}) locations = ld return { 'id': image.image_id, 'name': image.name, 'status': image.status, 'created_at': image.created_at, 'min_disk': image.min_disk, 'min_ram': image.min_ram, 'protected': image.protected, 'locations': locations, 'checksum': image.checksum, 'owner': image.owner, 'disk_format': image.disk_format, 'container_format': image.container_format, 'size': image.size, 'virtual_size': image.virtual_size, 'is_public': image.visibility == 'public', 'properties': dict(image.extra_properties), } def add(self, image): image_values = self._format_image_to_db(image) if (image_values['size'] is not None and image_values['size'] > CONF.image_size_cap): raise exception.ImageSizeLimitExceeded # the updated_at value is not set in the _format_image_to_db # function since it is specific to image create image_values['updated_at'] = image.updated_at new_values = self.db_api.image_create(self.context, image_values) self.db_api.image_tag_set_all(self.context, image.image_id, image.tags) image.created_at = new_values['created_at'] image.updated_at = new_values['updated_at'] def save(self, image, from_state=None): image_values = self._format_image_to_db(image) if (image_values['size'] is not None and image_values['size'] > CONF.image_size_cap): raise exception.ImageSizeLimitExceeded try: new_values = self.db_api.image_update(self.context, image.image_id, image_values, purge_props=True, from_state=from_state) except (exception.ImageNotFound, exception.Forbidden): msg = _("No image found with ID %s") % image.image_id raise exception.ImageNotFound(msg) self.db_api.image_tag_set_all(self.context, image.image_id, image.tags) image.updated_at = new_values['updated_at'] def remove(self, image): try: self.db_api.image_update(self.context, image.image_id, {'status': image.status}, purge_props=True) except (exception.ImageNotFound, exception.Forbidden): msg = _("No image found with ID %s") % image.image_id raise exception.ImageNotFound(msg) # NOTE(markwash): don't update tags? new_values = self.db_api.image_destroy(self.context, image.image_id) image.updated_at = new_values['updated_at'] class ImageProxy(glance.domain.proxy.Image): def __init__(self, image, context, db_api): self.context = context self.db_api = db_api self.image = image super(ImageProxy, self).__init__(image) class ImageMemberRepo(object): def __init__(self, context, db_api, image): self.context = context self.db_api = db_api self.image = image def _format_image_member_from_db(self, db_image_member): return glance.domain.ImageMembership( id=db_image_member['id'], image_id=db_image_member['image_id'], member_id=db_image_member['member'], status=db_image_member['status'], created_at=db_image_member['created_at'], updated_at=db_image_member['updated_at'] ) def _format_image_member_to_db(self, image_member): image_member = {'image_id': self.image.image_id, 'member': image_member.member_id, 'status': image_member.status, 'created_at': image_member.created_at} return image_member def list(self): db_members = self.db_api.image_member_find( self.context, image_id=self.image.image_id) image_members = [] for db_member in db_members: image_members.append(self._format_image_member_from_db(db_member)) return image_members def add(self, image_member): try: self.get(image_member.member_id) except exception.NotFound: pass else: msg = _('The target member %(member_id)s is already ' 'associated with image %(image_id)s.') % { 'member_id': image_member.member_id, 'image_id': self.image.image_id} raise exception.Duplicate(msg) image_member_values = self._format_image_member_to_db(image_member) # Note(shalq): find the image member including the member marked with # deleted. We will use only one record to represent membership between # the same image and member. The record of the deleted image member # will be reused, if it exists, update its properties instead of # creating a new one. members = self.db_api.image_member_find(self.context, image_id=self.image.image_id, member=image_member.member_id, include_deleted=True) if members: new_values = self.db_api.image_member_update(self.context, members[0]['id'], image_member_values) else: new_values = self.db_api.image_member_create(self.context, image_member_values) image_member.created_at = new_values['created_at'] image_member.updated_at = new_values['updated_at'] image_member.id = new_values['id'] def remove(self, image_member): try: self.db_api.image_member_delete(self.context, image_member.id) except (exception.NotFound, exception.Forbidden): msg = _("The specified member %s could not be found") raise exception.NotFound(msg % image_member.id) def save(self, image_member, from_state=None): image_member_values = self._format_image_member_to_db(image_member) try: new_values = self.db_api.image_member_update(self.context, image_member.id, image_member_values) except (exception.NotFound, exception.Forbidden): raise exception.NotFound() image_member.updated_at = new_values['updated_at'] def get(self, member_id): try: db_api_image_member = self.db_api.image_member_find( self.context, self.image.image_id, member_id) if not db_api_image_member: raise exception.NotFound() except (exception.NotFound, exception.Forbidden): raise exception.NotFound() image_member = self._format_image_member_from_db( db_api_image_member[0]) return image_member class TaskRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api def _format_task_from_db(self, db_task): return glance.domain.Task( task_id=db_task['id'], task_type=db_task['type'], status=db_task['status'], owner=db_task['owner'], expires_at=db_task['expires_at'], created_at=db_task['created_at'], updated_at=db_task['updated_at'], task_input=db_task['input'], result=db_task['result'], message=db_task['message'], ) def _format_task_stub_from_db(self, db_task): return glance.domain.TaskStub( task_id=db_task['id'], task_type=db_task['type'], status=db_task['status'], owner=db_task['owner'], expires_at=db_task['expires_at'], created_at=db_task['created_at'], updated_at=db_task['updated_at'], ) def _format_task_to_db(self, task): task = {'id': task.task_id, 'type': task.type, 'status': task.status, 'input': task.task_input, 'result': task.result, 'owner': task.owner, 'message': task.message, 'expires_at': task.expires_at, 'created_at': task.created_at, 'updated_at': task.updated_at, } return task def get(self, task_id): try: db_api_task = self.db_api.task_get(self.context, task_id) except (exception.NotFound, exception.Forbidden): msg = _('Could not find task %s') % task_id raise exception.NotFound(msg) return self._format_task_from_db(db_api_task) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): db_api_tasks = self.db_api.task_get_all(self.context, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir) return [self._format_task_stub_from_db(task) for task in db_api_tasks] def save(self, task): task_values = self._format_task_to_db(task) try: updated_values = self.db_api.task_update(self.context, task.task_id, task_values) except (exception.NotFound, exception.Forbidden): msg = _('Could not find task %s') % task.task_id raise exception.NotFound(msg) task.updated_at = updated_values['updated_at'] def add(self, task): task_values = self._format_task_to_db(task) updated_values = self.db_api.task_create(self.context, task_values) task.created_at = updated_values['created_at'] task.updated_at = updated_values['updated_at'] def remove(self, task): task_values = self._format_task_to_db(task) try: self.db_api.task_update(self.context, task.task_id, task_values) updated_values = self.db_api.task_delete(self.context, task.task_id) except (exception.NotFound, exception.Forbidden): msg = _('Could not find task %s') % task.task_id raise exception.NotFound(msg) task.updated_at = updated_values['updated_at'] task.deleted_at = updated_values['deleted_at'] class MetadefNamespaceRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api def _format_namespace_from_db(self, namespace_obj): return glance.domain.MetadefNamespace( namespace_id=namespace_obj['id'], namespace=namespace_obj['namespace'], display_name=namespace_obj['display_name'], description=namespace_obj['description'], owner=namespace_obj['owner'], visibility=namespace_obj['visibility'], protected=namespace_obj['protected'], created_at=namespace_obj['created_at'], updated_at=namespace_obj['updated_at'] ) def _format_namespace_to_db(self, namespace_obj): namespace = { 'namespace': namespace_obj.namespace, 'display_name': namespace_obj.display_name, 'description': namespace_obj.description, 'visibility': namespace_obj.visibility, 'protected': namespace_obj.protected, 'owner': namespace_obj.owner } return namespace def add(self, namespace): self.db_api.metadef_namespace_create( self.context, self._format_namespace_to_db(namespace) ) def get(self, namespace): try: db_api_namespace = self.db_api.metadef_namespace_get( self.context, namespace) except (exception.NotFound, exception.Forbidden): msg = _('Could not find namespace %s') % namespace raise exception.NotFound(msg) return self._format_namespace_from_db(db_api_namespace) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): db_namespaces = self.db_api.metadef_namespace_get_all( self.context, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters ) return [self._format_namespace_from_db(namespace_obj) for namespace_obj in db_namespaces] def remove(self, namespace): try: self.db_api.metadef_namespace_delete(self.context, namespace.namespace) except (exception.NotFound, exception.Forbidden): msg = _("The specified namespace %s could not be found") raise exception.NotFound(msg % namespace.namespace) def remove_objects(self, namespace): try: self.db_api.metadef_object_delete_namespace_content( self.context, namespace.namespace ) except (exception.NotFound, exception.Forbidden): msg = _("The specified namespace %s could not be found") raise exception.NotFound(msg % namespace.namespace) def remove_properties(self, namespace): try: self.db_api.metadef_property_delete_namespace_content( self.context, namespace.namespace ) except (exception.NotFound, exception.Forbidden): msg = _("The specified namespace %s could not be found") raise exception.NotFound(msg % namespace.namespace) def remove_tags(self, namespace): try: self.db_api.metadef_tag_delete_namespace_content( self.context, namespace.namespace ) except (exception.NotFound, exception.Forbidden): msg = _("The specified namespace %s could not be found") raise exception.NotFound(msg % namespace.namespace) def object_count(self, namespace_name): return self.db_api.metadef_object_count( self.context, namespace_name ) def property_count(self, namespace_name): return self.db_api.metadef_property_count( self.context, namespace_name ) def save(self, namespace): try: self.db_api.metadef_namespace_update( self.context, namespace.namespace_id, self._format_namespace_to_db(namespace) ) except exception.NotFound as e: raise exception.NotFound(explanation=e.msg) return namespace class MetadefObjectRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) def _format_metadef_object_from_db(self, metadata_object, namespace_entity): required_str = metadata_object['required'] required_list = required_str.split(",") if required_str else [] # Convert the persisted json schema to a dict of PropertyTypes property_types = {} json_props = metadata_object['json_schema'] for id in json_props: property_types[id] = json.fromjson(PropertyType, json_props[id]) return glance.domain.MetadefObject( namespace=namespace_entity, object_id=metadata_object['id'], name=metadata_object['name'], required=required_list, description=metadata_object['description'], properties=property_types, created_at=metadata_object['created_at'], updated_at=metadata_object['updated_at'] ) def _format_metadef_object_to_db(self, metadata_object): required_str = (",".join(metadata_object.required) if metadata_object.required else None) # Convert the model PropertyTypes dict to a JSON string properties = metadata_object.properties db_schema = {} if properties: for k, v in properties.items(): json_data = json.tojson(PropertyType, v) db_schema[k] = json_data db_metadata_object = { 'name': metadata_object.name, 'required': required_str, 'description': metadata_object.description, 'json_schema': db_schema } return db_metadata_object def add(self, metadata_object): self.db_api.metadef_object_create( self.context, metadata_object.namespace, self._format_metadef_object_to_db(metadata_object) ) def get(self, namespace, object_name): try: namespace_entity = self.meta_namespace_repo.get(namespace) db_metadata_object = self.db_api.metadef_object_get( self.context, namespace, object_name) except (exception.NotFound, exception.Forbidden): msg = _('Could not find metadata object %s') % object_name raise exception.NotFound(msg) return self._format_metadef_object_from_db(db_metadata_object, namespace_entity) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): namespace = filters['namespace'] namespace_entity = self.meta_namespace_repo.get(namespace) db_metadata_objects = self.db_api.metadef_object_get_all( self.context, namespace) return [self._format_metadef_object_from_db(metadata_object, namespace_entity) for metadata_object in db_metadata_objects] def remove(self, metadata_object): try: self.db_api.metadef_object_delete( self.context, metadata_object.namespace.namespace, metadata_object.name ) except (exception.NotFound, exception.Forbidden): msg = _("The specified metadata object %s could not be found") raise exception.NotFound(msg % metadata_object.name) def save(self, metadata_object): try: self.db_api.metadef_object_update( self.context, metadata_object.namespace.namespace, metadata_object.object_id, self._format_metadef_object_to_db(metadata_object)) except exception.NotFound as e: raise exception.NotFound(explanation=e.msg) return metadata_object class MetadefResourceTypeRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) def _format_resource_type_from_db(self, resource_type, namespace): return glance.domain.MetadefResourceType( namespace=namespace, name=resource_type['name'], prefix=resource_type['prefix'], properties_target=resource_type['properties_target'], created_at=resource_type['created_at'], updated_at=resource_type['updated_at'] ) def _format_resource_type_to_db(self, resource_type): db_resource_type = { 'name': resource_type.name, 'prefix': resource_type.prefix, 'properties_target': resource_type.properties_target } return db_resource_type def add(self, resource_type): self.db_api.metadef_resource_type_association_create( self.context, resource_type.namespace, self._format_resource_type_to_db(resource_type) ) def get(self, resource_type, namespace): namespace_entity = self.meta_namespace_repo.get(namespace) db_resource_type = ( self.db_api. metadef_resource_type_association_get( self.context, namespace, resource_type ) ) return self._format_resource_type_from_db(db_resource_type, namespace_entity) def list(self, filters=None): namespace = filters['namespace'] if namespace: namespace_entity = self.meta_namespace_repo.get(namespace) db_resource_types = ( self.db_api. metadef_resource_type_association_get_all_by_namespace( self.context, namespace ) ) return [self._format_resource_type_from_db(resource_type, namespace_entity) for resource_type in db_resource_types] else: db_resource_types = ( self.db_api. metadef_resource_type_get_all(self.context) ) return [glance.domain.MetadefResourceType( namespace=None, name=resource_type['name'], prefix=None, properties_target=None, created_at=resource_type['created_at'], updated_at=resource_type['updated_at'] ) for resource_type in db_resource_types] def remove(self, resource_type): try: self.db_api.metadef_resource_type_association_delete( self.context, resource_type.namespace.namespace, resource_type.name) except (exception.NotFound, exception.Forbidden): msg = _("The specified resource type %s could not be found ") raise exception.NotFound(msg % resource_type.name) class MetadefPropertyRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) def _format_metadef_property_from_db( self, property, namespace_entity): return glance.domain.MetadefProperty( namespace=namespace_entity, property_id=property['id'], name=property['name'], schema=property['json_schema'] ) def _format_metadef_property_to_db(self, property): db_metadata_object = { 'name': property.name, 'json_schema': property.schema } return db_metadata_object def add(self, property): self.db_api.metadef_property_create( self.context, property.namespace, self._format_metadef_property_to_db(property) ) def get(self, namespace, property_name): try: namespace_entity = self.meta_namespace_repo.get(namespace) db_property_type = self.db_api.metadef_property_get( self.context, namespace, property_name ) except (exception.NotFound, exception.Forbidden): msg = _('Could not find property %s') % property_name raise exception.NotFound(msg) return self._format_metadef_property_from_db( db_property_type, namespace_entity) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): namespace = filters['namespace'] namespace_entity = self.meta_namespace_repo.get(namespace) db_properties = self.db_api.metadef_property_get_all( self.context, namespace) return ( [self._format_metadef_property_from_db( property, namespace_entity) for property in db_properties] ) def remove(self, property): try: self.db_api.metadef_property_delete( self.context, property.namespace.namespace, property.name) except (exception.NotFound, exception.Forbidden): msg = _("The specified property %s could not be found") raise exception.NotFound(msg % property.name) def save(self, property): try: self.db_api.metadef_property_update( self.context, property.namespace.namespace, property.property_id, self._format_metadef_property_to_db(property) ) except exception.NotFound as e: raise exception.NotFound(explanation=e.msg) return property class MetadefTagRepo(object): def __init__(self, context, db_api): self.context = context self.db_api = db_api self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) def _format_metadef_tag_from_db(self, metadata_tag, namespace_entity): return glance.domain.MetadefTag( namespace=namespace_entity, tag_id=metadata_tag['id'], name=metadata_tag['name'], created_at=metadata_tag['created_at'], updated_at=metadata_tag['updated_at'] ) def _format_metadef_tag_to_db(self, metadata_tag): db_metadata_tag = { 'name': metadata_tag.name } return db_metadata_tag def add(self, metadata_tag): self.db_api.metadef_tag_create( self.context, metadata_tag.namespace, self._format_metadef_tag_to_db(metadata_tag) ) def add_tags(self, metadata_tags): tag_list = [] namespace = None for metadata_tag in metadata_tags: tag_list.append(self._format_metadef_tag_to_db(metadata_tag)) if namespace is None: namespace = metadata_tag.namespace self.db_api.metadef_tag_create_tags( self.context, namespace, tag_list) def get(self, namespace, name): try: namespace_entity = self.meta_namespace_repo.get(namespace) db_metadata_tag = self.db_api.metadef_tag_get( self.context, namespace, name) except (exception.NotFound, exception.Forbidden): msg = _('Could not find metadata tag %s') % name raise exception.NotFound(msg) return self._format_metadef_tag_from_db(db_metadata_tag, namespace_entity) def list(self, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): namespace = filters['namespace'] namespace_entity = self.meta_namespace_repo.get(namespace) db_metadata_tag = self.db_api.metadef_tag_get_all( self.context, namespace, filters, marker, limit, sort_key, sort_dir) return [self._format_metadef_tag_from_db(metadata_tag, namespace_entity) for metadata_tag in db_metadata_tag] def remove(self, metadata_tag): try: self.db_api.metadef_tag_delete( self.context, metadata_tag.namespace.namespace, metadata_tag.name ) except (exception.NotFound, exception.Forbidden): msg = _("The specified metadata tag %s could not be found") raise exception.NotFound(msg % metadata_tag.name) def save(self, metadata_tag): try: self.db_api.metadef_tag_update( self.context, metadata_tag.namespace.namespace, metadata_tag.tag_id, self._format_metadef_tag_to_db(metadata_tag)) except exception.NotFound as e: raise exception.NotFound(explanation=e.msg) return metadata_tag glance-12.0.0/glance/db/registry/0000775000567000056710000000000012701407204017647 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/registry/__init__.py0000664000567000056710000000000012701407047021753 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/registry/api.py0000664000567000056710000004633412701407047021011 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is the Registry's Driver API. This API relies on the registry RPC client (version >= 2). The functions bellow work as a proxy for the database back-end configured in the registry service, which means that everything returned by that back-end will be also returned by this API. This API exists for supporting deployments not willing to put database credentials in glance-api. Those deployments can rely on this registry driver that will talk to a remote registry service, which will then access the database back-end. """ import functools from glance import glare from glance.registry.client.v2 import api def configure(): api.configure_registry_client() def _get_client(func): """Injects a client instance to the each function This decorator creates an instance of the Registry client and passes it as an argument to each function in this API. """ @functools.wraps(func) def wrapper(context, *args, **kwargs): client = api.get_registry_client(context) return func(client, *args, **kwargs) return wrapper @_get_client def image_create(client, values): """Create an image from the values dictionary.""" return client.image_create(values=values) @_get_client def image_update(client, image_id, values, purge_props=False, from_state=None): """ Set the given properties on an image and update it. :raises: ImageNotFound if image does not exist. """ return client.image_update(values=values, image_id=image_id, purge_props=purge_props, from_state=from_state) @_get_client def image_destroy(client, image_id): """Destroy the image or raise if it does not exist.""" return client.image_destroy(image_id=image_id) @_get_client def image_get(client, image_id, force_show_deleted=False): return client.image_get(image_id=image_id, force_show_deleted=force_show_deleted) def is_image_visible(context, image, status=None): """Return True if the image is visible in this context.""" # Is admin == image visible if context.is_admin: return True # No owner == image visible if image['owner'] is None: return True # Image is_public == image visible if image['is_public']: return True # Perform tests based on whether we have an owner if context.owner is not None: if context.owner == image['owner']: return True # Figure out if this image is shared with that tenant members = image_member_find(context, image_id=image['id'], member=context.owner, status=status) if members: return True # Private image return False @_get_client def image_get_all(client, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False): """ Get all images that match zero or more filters. :param filters: dict of filter keys and values. If a 'properties' key is present, it is treated as a dict of key/value filters on the image properties attribute :param marker: image id after which to start page :param limit: maximum number of images to return :param sort_key: image attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :param member_status: only return shared images that have this membership status :param is_public: If true, return only public images. If false, return only private and shared images. :param admin_as_user: For backwards compatibility. If true, then return to an admin the equivalent set of images which it would see if it were a regular user :param return_tag: To indicates whether image entry in result includes it relevant tag entries. This could improve upper-layer query performance, to prevent using separated calls """ sort_key = ['created_at'] if not sort_key else sort_key sort_dir = ['desc'] if not sort_dir else sort_dir return client.image_get_all(filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, member_status=member_status, is_public=is_public, admin_as_user=admin_as_user, return_tag=return_tag) @_get_client def image_property_create(client, values, session=None): """Create an ImageProperty object""" return client.image_property_create(values=values) @_get_client def image_property_delete(client, prop_ref, image_ref, session=None): """ Used internally by _image_property_create and image_property_update """ return client.image_property_delete(prop_ref=prop_ref, image_ref=image_ref) @_get_client def image_member_create(client, values, session=None): """Create an ImageMember object""" return client.image_member_create(values=values) @_get_client def image_member_update(client, memb_id, values): """Update an ImageMember object""" return client.image_member_update(memb_id=memb_id, values=values) @_get_client def image_member_delete(client, memb_id, session=None): """Delete an ImageMember object""" client.image_member_delete(memb_id=memb_id) @_get_client def image_member_find(client, image_id=None, member=None, status=None, include_deleted=False): """Find all members that meet the given criteria. Note, currently include_deleted should be true only when create a new image membership, as there may be a deleted image membership between the same image and tenant, the membership will be reused in this case. It should be false in other cases. :param image_id: identifier of image entity :param member: tenant to which membership has been granted :include_deleted: A boolean indicating whether the result should include the deleted record of image member """ return client.image_member_find(image_id=image_id, member=member, status=status, include_deleted=include_deleted) @_get_client def image_member_count(client, image_id): """Return the number of image members for this image :param image_id: identifier of image entity """ return client.image_member_count(image_id=image_id) @_get_client def image_tag_set_all(client, image_id, tags): client.image_tag_set_all(image_id=image_id, tags=tags) @_get_client def image_tag_create(client, image_id, value, session=None): """Create an image tag.""" return client.image_tag_create(image_id=image_id, value=value) @_get_client def image_tag_delete(client, image_id, value, session=None): """Delete an image tag.""" client.image_tag_delete(image_id=image_id, value=value) @_get_client def image_tag_get_all(client, image_id, session=None): """Get a list of tags for a specific image.""" return client.image_tag_get_all(image_id=image_id) @_get_client def image_location_delete(client, image_id, location_id, status, session=None): """Delete an image location.""" client.image_location_delete(image_id=image_id, location_id=location_id, status=status) @_get_client def image_location_update(client, image_id, location, session=None): """Update image location.""" client.image_location_update(image_id=image_id, location=location) @_get_client def user_get_storage_usage(client, owner_id, image_id=None, session=None): return client.user_get_storage_usage(owner_id=owner_id, image_id=image_id) @_get_client def task_get(client, task_id, session=None, force_show_deleted=False): """Get a single task object :returns: task dictionary """ return client.task_get(task_id=task_id, session=session, force_show_deleted=force_show_deleted) @_get_client def task_get_all(client, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc', admin_as_user=False): """Get all tasks that match zero or more filters. :param filters: dict of filter keys and values. :param marker: task id after which to start page :param limit: maximum number of tasks to return :param sort_key: task attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :param admin_as_user: For backwards compatibility. If true, then return to an admin the equivalent set of tasks which it would see if it were a regular user :returns: tasks set """ return client.task_get_all(filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, admin_as_user=admin_as_user) @_get_client def task_create(client, values, session=None): """Create a task object""" return client.task_create(values=values, session=session) @_get_client def task_delete(client, task_id, session=None): """Delete a task object""" return client.task_delete(task_id=task_id, session=session) @_get_client def task_update(client, task_id, values, session=None): return client.task_update(task_id=task_id, values=values, session=session) # Metadef @_get_client def metadef_namespace_get_all( client, marker=None, limit=None, sort_key='created_at', sort_dir=None, filters=None, session=None): return client.metadef_namespace_get_all( marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters) @_get_client def metadef_namespace_get(client, namespace_name, session=None): return client.metadef_namespace_get(namespace_name=namespace_name) @_get_client def metadef_namespace_create(client, values, session=None): return client.metadef_namespace_create(values=values) @_get_client def metadef_namespace_update( client, namespace_id, namespace_dict, session=None): return client.metadef_namespace_update( namespace_id=namespace_id, namespace_dict=namespace_dict) @_get_client def metadef_namespace_delete(client, namespace_name, session=None): return client.metadef_namespace_delete( namespace_name=namespace_name) @_get_client def metadef_object_get_all(client, namespace_name, session=None): return client.metadef_object_get_all( namespace_name=namespace_name) @_get_client def metadef_object_get( client, namespace_name, object_name, session=None): return client.metadef_object_get( namespace_name=namespace_name, object_name=object_name) @_get_client def metadef_object_create( client, namespace_name, object_dict, session=None): return client.metadef_object_create( namespace_name=namespace_name, object_dict=object_dict) @_get_client def metadef_object_update( client, namespace_name, object_id, object_dict, session=None): return client.metadef_object_update( namespace_name=namespace_name, object_id=object_id, object_dict=object_dict) @_get_client def metadef_object_delete( client, namespace_name, object_name, session=None): return client.metadef_object_delete( namespace_name=namespace_name, object_name=object_name) @_get_client def metadef_object_delete_namespace_content( client, namespace_name, session=None): return client.metadef_object_delete_namespace_content( namespace_name=namespace_name) @_get_client def metadef_object_count( client, namespace_name, session=None): return client.metadef_object_count( namespace_name=namespace_name) @_get_client def metadef_property_get_all( client, namespace_name, session=None): return client.metadef_property_get_all( namespace_name=namespace_name) @_get_client def metadef_property_get( client, namespace_name, property_name, session=None): return client.metadef_property_get( namespace_name=namespace_name, property_name=property_name) @_get_client def metadef_property_create( client, namespace_name, property_dict, session=None): return client.metadef_property_create( namespace_name=namespace_name, property_dict=property_dict) @_get_client def metadef_property_update( client, namespace_name, property_id, property_dict, session=None): return client.metadef_property_update( namespace_name=namespace_name, property_id=property_id, property_dict=property_dict) @_get_client def metadef_property_delete( client, namespace_name, property_name, session=None): return client.metadef_property_delete( namespace_name=namespace_name, property_name=property_name) @_get_client def metadef_property_delete_namespace_content( client, namespace_name, session=None): return client.metadef_property_delete_namespace_content( namespace_name=namespace_name) @_get_client def metadef_property_count( client, namespace_name, session=None): return client.metadef_property_count( namespace_name=namespace_name) @_get_client def metadef_resource_type_create(client, values, session=None): return client.metadef_resource_type_create(values=values) @_get_client def metadef_resource_type_get( client, resource_type_name, session=None): return client.metadef_resource_type_get( resource_type_name=resource_type_name) @_get_client def metadef_resource_type_get_all(client, session=None): return client.metadef_resource_type_get_all() @_get_client def metadef_resource_type_delete( client, resource_type_name, session=None): return client.metadef_resource_type_delete( resource_type_name=resource_type_name) @_get_client def metadef_resource_type_association_get( client, namespace_name, resource_type_name, session=None): return client.metadef_resource_type_association_get( namespace_name=namespace_name, resource_type_name=resource_type_name) @_get_client def metadef_resource_type_association_create( client, namespace_name, values, session=None): return client.metadef_resource_type_association_create( namespace_name=namespace_name, values=values) @_get_client def metadef_resource_type_association_delete( client, namespace_name, resource_type_name, session=None): return client.metadef_resource_type_association_delete( namespace_name=namespace_name, resource_type_name=resource_type_name) @_get_client def metadef_resource_type_association_get_all_by_namespace( client, namespace_name, session=None): return client.metadef_resource_type_association_get_all_by_namespace( namespace_name=namespace_name) @_get_client def metadef_tag_get_all(client, namespace_name, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir=None, session=None): return client.metadef_tag_get_all( namespace_name=namespace_name, filters=filters, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, session=session) @_get_client def metadef_tag_get(client, namespace_name, name, session=None): return client.metadef_tag_get( namespace_name=namespace_name, name=name) @_get_client def metadef_tag_create( client, namespace_name, tag_dict, session=None): return client.metadef_tag_create( namespace_name=namespace_name, tag_dict=tag_dict) @_get_client def metadef_tag_create_tags( client, namespace_name, tag_list, session=None): return client.metadef_tag_create_tags( namespace_name=namespace_name, tag_list=tag_list) @_get_client def metadef_tag_update( client, namespace_name, id, tag_dict, session=None): return client.metadef_tag_update( namespace_name=namespace_name, id=id, tag_dict=tag_dict) @_get_client def metadef_tag_delete( client, namespace_name, name, session=None): return client.metadef_tag_delete( namespace_name=namespace_name, name=name) @_get_client def metadef_tag_delete_namespace_content( client, namespace_name, session=None): return client.metadef_tag_delete_namespace_content( namespace_name=namespace_name) @_get_client def metadef_tag_count(client, namespace_name, session=None): return client.metadef_tag_count(namespace_name=namespace_name) @_get_client def artifact_create(client, values, type_name, type_version=None, session=None): return client.artifact_create(values=values, type_name=type_name, type_version=type_version) @_get_client def artifact_update(client, values, artifact_id, type_name, type_version=None, session=None): return client.artifact_update(values=values, artifact_id=artifact_id, type_name=type_name, type_version=type_version) @_get_client def artifact_delete(client, artifact_id, type_name, type_version=None, session=None): return client.artifact_delete(artifact_id=artifact_id, type_name=type_name, type_version=type_version) @_get_client def artifact_get(client, artifact_id, type_name, type_version=None, session=None): return client.artifact_get(artifact_id=artifact_id, type_name=type_name, type_version=type_version) @_get_client def artifact_get_all(client, marker=None, limit=None, sort_key=None, sort_dir=None, filters=None, show_level=glare.Showlevel.NONE, session=None): if filters is None: filters = {} return client.artifact_create(marker, limit, sort_key, sort_dir, filters, show_level) @_get_client def artifact_publish(client, artifact_id, type_name, type_version=None, session=None): return client.artifact_publish(artifact_id=artifact_id, type_name=type_name, type_version=type_version) glance-12.0.0/glance/db/simple/0000775000567000056710000000000012701407204017270 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/simple/__init__.py0000664000567000056710000000000012701407047021374 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/simple/api.py0000664000567000056710000020713012701407051020416 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack, Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import functools import uuid from oslo_log import log as logging import six from glance.common import exception from glance.common import timeutils from glance.common import utils from glance.i18n import _, _LI, _LW LOG = logging.getLogger(__name__) DATA = { 'images': {}, 'members': {}, 'metadef_namespace_resource_types': [], 'metadef_namespaces': [], 'metadef_objects': [], 'metadef_properties': [], 'metadef_resource_types': [], 'metadef_tags': [], 'tags': {}, 'locations': [], 'tasks': {}, 'task_info': {}, 'artifacts': {}, 'artifact_properties': {}, 'artifact_tags': {}, 'artifact_dependencies': {}, 'artifact_blobs': {}, 'artifact_blob_locations': {} } INDEX = 0 def log_call(func): @functools.wraps(func) def wrapped(*args, **kwargs): LOG.info(_LI('Calling %(funcname)s: args=%(args)s, ' 'kwargs=%(kwargs)s'), {"funcname": func.__name__, "args": args, "kwargs": kwargs}) output = func(*args, **kwargs) LOG.info(_LI('Returning %(funcname)s: %(output)s'), {"funcname": func.__name__, "output": output}) return output return wrapped def reset(): global DATA DATA = { 'images': {}, 'members': [], 'metadef_namespace_resource_types': [], 'metadef_namespaces': [], 'metadef_objects': [], 'metadef_properties': [], 'metadef_resource_types': [], 'metadef_tags': [], 'tags': {}, 'locations': [], 'tasks': {}, 'task_info': {}, 'artifacts': {} } def clear_db_env(*args, **kwargs): """ Setup global environment configuration variables. We have no connection-oriented environment variables, so this is a NOOP. """ pass def _get_session(): return DATA @utils.no_4byte_params def _image_location_format(image_id, value, meta_data, status, deleted=False): dt = timeutils.utcnow() return { 'id': str(uuid.uuid4()), 'image_id': image_id, 'created_at': dt, 'updated_at': dt, 'deleted_at': dt if deleted else None, 'deleted': deleted, 'url': value, 'metadata': meta_data, 'status': status, } def _image_property_format(image_id, name, value): return { 'image_id': image_id, 'name': name, 'value': value, 'deleted': False, 'deleted_at': None, } def _image_member_format(image_id, tenant_id, can_share, status='pending', deleted=False): dt = timeutils.utcnow() return { 'id': str(uuid.uuid4()), 'image_id': image_id, 'member': tenant_id, 'can_share': can_share, 'status': status, 'created_at': dt, 'updated_at': dt, 'deleted': deleted, } def _pop_task_info_values(values): task_info_values = {} for k, v in list(values.items()): if k in ['input', 'result', 'message']: values.pop(k) task_info_values[k] = v return task_info_values def _format_task_from_db(task_ref, task_info_ref): task = copy.deepcopy(task_ref) if task_info_ref: task_info = copy.deepcopy(task_info_ref) task_info_values = _pop_task_info_values(task_info) task.update(task_info_values) return task def _task_format(task_id, **values): dt = timeutils.utcnow() task = { 'id': task_id, 'type': 'import', 'status': 'pending', 'owner': None, 'expires_at': None, 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, } task.update(values) return task def _task_info_format(task_id, **values): task_info = { 'task_id': task_id, 'input': None, 'result': None, 'message': None, } task_info.update(values) return task_info @utils.no_4byte_params def _image_update(image, values, properties): # NOTE(bcwaldon): store properties as a list to match sqlalchemy driver properties = [{'name': k, 'value': v, 'image_id': image['id'], 'deleted': False} for k, v in properties.items()] if 'properties' not in image.keys(): image['properties'] = [] image['properties'].extend(properties) image.update(values) return image def _image_format(image_id, **values): dt = timeutils.utcnow() image = { 'id': image_id, 'name': None, 'owner': None, 'locations': [], 'status': 'queued', 'protected': False, 'is_public': False, 'container_format': None, 'disk_format': None, 'min_ram': 0, 'min_disk': 0, 'size': None, 'virtual_size': None, 'checksum': None, 'tags': [], 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, } locations = values.pop('locations', None) if locations is not None: image['locations'] = [] for location in locations: location_ref = _image_location_format(image_id, location['url'], location['metadata'], location['status']) image['locations'].append(location_ref) DATA['locations'].append(location_ref) return _image_update(image, values, values.pop('properties', {})) def _filter_images(images, filters, context, status='accepted', is_public=None, admin_as_user=False): filtered_images = [] if 'properties' in filters: prop_filter = filters.pop('properties') filters.update(prop_filter) if status == 'all': status = None visibility = filters.pop('visibility', None) for image in images: member = image_member_find(context, image_id=image['id'], member=context.owner, status=status) is_member = len(member) > 0 has_ownership = context.owner and image['owner'] == context.owner can_see = (image['is_public'] or has_ownership or is_member or (context.is_admin and not admin_as_user)) if not can_see: continue if visibility: if visibility == 'public': if not image['is_public']: continue elif visibility == 'private': if image['is_public']: continue if not (has_ownership or (context.is_admin and not admin_as_user)): continue elif visibility == 'shared': if not is_member: continue if is_public is not None: if not image['is_public'] == is_public: continue to_add = True for k, value in six.iteritems(filters): key = k if k.endswith('_min') or k.endswith('_max'): key = key[0:-4] try: value = int(value) except ValueError: msg = _("Unable to filter on a range " "with a non-numeric value.") raise exception.InvalidFilterRangeValue(msg) if k.endswith('_min'): to_add = image.get(key) >= value elif k.endswith('_max'): to_add = image.get(key) <= value elif k in ['created_at', 'updated_at']: attr_value = image.get(key) operator, isotime = utils.split_filter_op(value) parsed_time = timeutils.parse_isotime(isotime) threshold = timeutils.normalize_time(parsed_time) to_add = utils.evaluate_filter_op(attr_value, operator, threshold) elif k in ['name', 'id', 'status', 'container_format', 'disk_format']: attr_value = image.get(key) operator, list_value = utils.split_filter_op(value) if operator == 'in': threshold = utils.split_filter_value_for_quotes(list_value) to_add = attr_value in threshold elif operator == 'eq': to_add = (attr_value == list_value) else: msg = (_("Unable to filter by unknown operator '%s'.") % operator) raise exception.InvalidFilterOperatorValue(msg) elif k != 'is_public' and image.get(k) is not None: to_add = image.get(key) == value elif k == 'tags': filter_tags = value image_tags = image_tag_get_all(context, image['id']) for tag in filter_tags: if tag not in image_tags: to_add = False break else: to_add = False for p in image['properties']: properties = {p['name']: p['value'], 'deleted': p['deleted']} to_add |= (properties.get(key) == value and properties.get('deleted') is False) if not to_add: break if to_add: filtered_images.append(image) return filtered_images def _do_pagination(context, images, marker, limit, show_deleted, status='accepted'): start = 0 end = -1 if marker is None: start = 0 else: # Check that the image is accessible _image_get(context, marker, force_show_deleted=show_deleted, status=status) for i, image in enumerate(images): if image['id'] == marker: start = i + 1 break else: raise exception.ImageNotFound() end = start + limit if limit is not None else None return images[start:end] def _sort_images(images, sort_key, sort_dir): sort_key = ['created_at'] if not sort_key else sort_key default_sort_dir = 'desc' if not sort_dir: sort_dir = [default_sort_dir] * len(sort_key) elif len(sort_dir) == 1: default_sort_dir = sort_dir[0] sort_dir *= len(sort_key) for key in ['created_at', 'id']: if key not in sort_key: sort_key.append(key) sort_dir.append(default_sort_dir) for key in sort_key: if images and not (key in images[0]): raise exception.InvalidSortKey() if any(dir for dir in sort_dir if dir not in ['asc', 'desc']): raise exception.InvalidSortDir() if len(sort_key) != len(sort_dir): raise exception.Invalid(message='Number of sort dirs does not match ' 'the number of sort keys') for key, dir in reversed(list(zip(sort_key, sort_dir))): reverse = dir == 'desc' images.sort(key=lambda x: x[key] or '', reverse=reverse) return images def _image_get(context, image_id, force_show_deleted=False, status=None): try: image = DATA['images'][image_id] except KeyError: LOG.warn(_LW('Could not find image %s') % image_id) raise exception.ImageNotFound() if image['deleted'] and not (force_show_deleted or context.can_see_deleted): LOG.warn(_LW('Unable to get deleted image')) raise exception.ImageNotFound() if not is_image_visible(context, image): LOG.warn(_LW('Unable to get unowned image')) raise exception.Forbidden("Image not visible to you") return image @log_call def image_get(context, image_id, session=None, force_show_deleted=False): image = _image_get(context, image_id, force_show_deleted) return _normalize_locations(context, copy.deepcopy(image), force_show_deleted=force_show_deleted) @log_call def image_get_all(context, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False): filters = filters or {} images = DATA['images'].values() images = _filter_images(images, filters, context, member_status, is_public, admin_as_user) images = _sort_images(images, sort_key, sort_dir) images = _do_pagination(context, images, marker, limit, filters.get('deleted')) force_show_deleted = True if filters.get('deleted') else False res = [] for image in images: img = _normalize_locations(context, copy.deepcopy(image), force_show_deleted=force_show_deleted) if return_tag: img['tags'] = image_tag_get_all(context, img['id']) res.append(img) return res @log_call def image_property_create(context, values): image = _image_get(context, values['image_id']) prop = _image_property_format(values['image_id'], values['name'], values['value']) image['properties'].append(prop) return prop @log_call def image_property_delete(context, prop_ref, image_ref): prop = None for p in DATA['images'][image_ref]['properties']: if p['name'] == prop_ref: prop = p if not prop: raise exception.NotFound() prop['deleted_at'] = timeutils.utcnow() prop['deleted'] = True return prop @log_call def image_member_find(context, image_id=None, member=None, status=None, include_deleted=False): filters = [] images = DATA['images'] members = DATA['members'] def is_visible(member): return (member['member'] == context.owner or images[member['image_id']]['owner'] == context.owner) if not context.is_admin: filters.append(is_visible) if image_id is not None: filters.append(lambda m: m['image_id'] == image_id) if member is not None: filters.append(lambda m: m['member'] == member) if status is not None: filters.append(lambda m: m['status'] == status) for f in filters: members = filter(f, members) return [copy.deepcopy(m) for m in members] @log_call def image_member_count(context, image_id): """Return the number of image members for this image :param image_id: identifier of image entity """ if not image_id: msg = _("Image id is required.") raise exception.Invalid(msg) members = DATA['members'] return len([x for x in members if x['image_id'] == image_id]) @log_call def image_member_create(context, values): member = _image_member_format(values['image_id'], values['member'], values.get('can_share', False), values.get('status', 'pending'), values.get('deleted', False)) global DATA DATA['members'].append(member) return copy.deepcopy(member) @log_call def image_member_update(context, member_id, values): global DATA for member in DATA['members']: if member['id'] == member_id: member.update(values) member['updated_at'] = timeutils.utcnow() return copy.deepcopy(member) else: raise exception.NotFound() @log_call def image_member_delete(context, member_id): global DATA for i, member in enumerate(DATA['members']): if member['id'] == member_id: del DATA['members'][i] break else: raise exception.NotFound() @log_call @utils.no_4byte_params def image_location_add(context, image_id, location): deleted = location['status'] in ('deleted', 'pending_delete') location_ref = _image_location_format(image_id, value=location['url'], meta_data=location['metadata'], status=location['status'], deleted=deleted) DATA['locations'].append(location_ref) image = DATA['images'][image_id] image.setdefault('locations', []).append(location_ref) @log_call @utils.no_4byte_params def image_location_update(context, image_id, location): loc_id = location.get('id') if loc_id is None: msg = _("The location data has an invalid ID: %d") % loc_id raise exception.Invalid(msg) deleted = location['status'] in ('deleted', 'pending_delete') updated_time = timeutils.utcnow() delete_time = updated_time if deleted else None updated = False for loc in DATA['locations']: if loc['id'] == loc_id and loc['image_id'] == image_id: loc.update({"value": location['url'], "meta_data": location['metadata'], "status": location['status'], "deleted": deleted, "updated_at": updated_time, "deleted_at": delete_time}) updated = True break if not updated: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=loc_id, img=image_id)) LOG.warn(msg) raise exception.NotFound(msg) @log_call def image_location_delete(context, image_id, location_id, status, delete_time=None): if status not in ('deleted', 'pending_delete'): msg = _("The status of deleted image location can only be set to " "'pending_delete' or 'deleted'.") raise exception.Invalid(msg) deleted = False for loc in DATA['locations']: if loc['id'] == location_id and loc['image_id'] == image_id: deleted = True delete_time = delete_time or timeutils.utcnow() loc.update({"deleted": deleted, "status": status, "updated_at": delete_time, "deleted_at": delete_time}) break if not deleted: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=location_id, img=image_id)) LOG.warn(msg) raise exception.NotFound(msg) def _image_locations_set(context, image_id, locations): # NOTE(zhiyan): 1. Remove records from DB for deleted locations used_loc_ids = [loc['id'] for loc in locations if loc.get('id')] image = DATA['images'][image_id] for loc in image['locations']: if loc['id'] not in used_loc_ids and not loc['deleted']: image_location_delete(context, image_id, loc['id'], 'deleted') for i, loc in enumerate(DATA['locations']): if (loc['image_id'] == image_id and loc['id'] not in used_loc_ids and not loc['deleted']): del DATA['locations'][i] # NOTE(zhiyan): 2. Adding or update locations for loc in locations: if loc.get('id') is None: image_location_add(context, image_id, loc) else: image_location_update(context, image_id, loc) def _image_locations_delete_all(context, image_id, delete_time=None): image = DATA['images'][image_id] for loc in image['locations']: if not loc['deleted']: image_location_delete(context, image_id, loc['id'], 'deleted', delete_time=delete_time) for i, loc in enumerate(DATA['locations']): if image_id == loc['image_id'] and loc['deleted'] == False: del DATA['locations'][i] def _normalize_locations(context, image, force_show_deleted=False): """ Generate suitable dictionary list for locations field of image. We don't need to set other data fields of location record which return from image query. """ if image['status'] == 'deactivated' and not context.is_admin: # Locations are not returned for a deactivated image for non-admin user image['locations'] = [] return image if force_show_deleted: locations = image['locations'] else: locations = filter(lambda x: not x['deleted'], image['locations']) image['locations'] = [{'id': loc['id'], 'url': loc['url'], 'metadata': loc['metadata'], 'status': loc['status']} for loc in locations] return image @log_call def image_create(context, image_values): global DATA image_id = image_values.get('id', str(uuid.uuid4())) if image_id in DATA['images']: raise exception.Duplicate() if 'status' not in image_values: raise exception.Invalid('status is a required attribute') allowed_keys = set(['id', 'name', 'status', 'min_ram', 'min_disk', 'size', 'virtual_size', 'checksum', 'locations', 'owner', 'protected', 'is_public', 'container_format', 'disk_format', 'created_at', 'updated_at', 'deleted', 'deleted_at', 'properties', 'tags']) incorrect_keys = set(image_values.keys()) - allowed_keys if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) image = _image_format(image_id, **image_values) DATA['images'][image_id] = image DATA['tags'][image_id] = image.pop('tags', []) return _normalize_locations(context, copy.deepcopy(image)) @log_call def image_update(context, image_id, image_values, purge_props=False, from_state=None): global DATA try: image = DATA['images'][image_id] except KeyError: raise exception.ImageNotFound() location_data = image_values.pop('locations', None) if location_data is not None: _image_locations_set(context, image_id, location_data) # replace values for properties that already exist new_properties = image_values.pop('properties', {}) for prop in image['properties']: if prop['name'] in new_properties: prop['value'] = new_properties.pop(prop['name']) elif purge_props: # this matches weirdness in the sqlalchemy api prop['deleted'] = True image['updated_at'] = timeutils.utcnow() _image_update(image, image_values, new_properties) DATA['images'][image_id] = image return _normalize_locations(context, copy.deepcopy(image)) @log_call def image_destroy(context, image_id): global DATA try: delete_time = timeutils.utcnow() DATA['images'][image_id]['deleted'] = True DATA['images'][image_id]['deleted_at'] = delete_time # NOTE(flaper87): Move the image to one of the deleted statuses # if it hasn't been done yet. if (DATA['images'][image_id]['status'] not in ['deleted', 'pending_delete']): DATA['images'][image_id]['status'] = 'deleted' _image_locations_delete_all(context, image_id, delete_time=delete_time) for prop in DATA['images'][image_id]['properties']: image_property_delete(context, prop['name'], image_id) members = image_member_find(context, image_id=image_id) for member in members: image_member_delete(context, member['id']) tags = image_tag_get_all(context, image_id) for tag in tags: image_tag_delete(context, image_id, tag) return _normalize_locations(context, copy.deepcopy(DATA['images'][image_id])) except KeyError: raise exception.ImageNotFound() @log_call def image_tag_get_all(context, image_id): return DATA['tags'].get(image_id, []) @log_call def image_tag_get(context, image_id, value): tags = image_tag_get_all(context, image_id) if value in tags: return value else: raise exception.NotFound() @log_call def image_tag_set_all(context, image_id, values): global DATA DATA['tags'][image_id] = values @log_call @utils.no_4byte_params def image_tag_create(context, image_id, value): global DATA DATA['tags'][image_id].append(value) return value @log_call def image_tag_delete(context, image_id, value): global DATA try: DATA['tags'][image_id].remove(value) except ValueError: raise exception.NotFound() def is_image_mutable(context, image): """Return True if the image is mutable in this context.""" # Is admin == image mutable if context.is_admin: return True # No owner == image not mutable if image['owner'] is None or context.owner is None: return False # Image only mutable by its owner return image['owner'] == context.owner def is_image_visible(context, image, status=None): """Return True if the image is visible in this context.""" # Is admin == image visible if context.is_admin: return True # No owner == image visible if image['owner'] is None: return True # Image is_public == image visible if image['is_public']: return True # Perform tests based on whether we have an owner if context.owner is not None: if context.owner == image['owner']: return True # Figure out if this image is shared with that tenant if status == 'all': status = None members = image_member_find(context, image_id=image['id'], member=context.owner, status=status) if members: return True # Private image return False def user_get_storage_usage(context, owner_id, image_id=None, session=None): images = image_get_all(context, filters={'owner': owner_id}) total = 0 for image in images: if image['status'] in ['killed', 'deleted']: continue if image['id'] != image_id: locations = [loc for loc in image['locations'] if loc.get('status') != 'deleted'] total += (image['size'] * len(locations)) return total @log_call def task_create(context, values): """Create a task object""" global DATA task_values = copy.deepcopy(values) task_id = task_values.get('id', str(uuid.uuid4())) required_attributes = ['type', 'status', 'input'] allowed_attributes = ['id', 'type', 'status', 'input', 'result', 'owner', 'message', 'expires_at', 'created_at', 'updated_at', 'deleted_at', 'deleted'] if task_id in DATA['tasks']: raise exception.Duplicate() for key in required_attributes: if key not in task_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(task_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) task_info_values = _pop_task_info_values(task_values) task = _task_format(task_id, **task_values) DATA['tasks'][task_id] = task task_info = _task_info_create(task['id'], task_info_values) return _format_task_from_db(task, task_info) @log_call def task_update(context, task_id, values): """Update a task object""" global DATA task_values = copy.deepcopy(values) task_info_values = _pop_task_info_values(task_values) try: task = DATA['tasks'][task_id] except KeyError: LOG.debug("No task found with ID %s", task_id) raise exception.TaskNotFound(task_id=task_id) task.update(task_values) task['updated_at'] = timeutils.utcnow() DATA['tasks'][task_id] = task task_info = _task_info_update(task['id'], task_info_values) return _format_task_from_db(task, task_info) @log_call def task_get(context, task_id, force_show_deleted=False): task, task_info = _task_get(context, task_id, force_show_deleted) return _format_task_from_db(task, task_info) def _task_get(context, task_id, force_show_deleted=False): try: task = DATA['tasks'][task_id] except KeyError: msg = _LW('Could not find task %s') % task_id LOG.warn(msg) raise exception.TaskNotFound(task_id=task_id) if task['deleted'] and not (force_show_deleted or context.can_see_deleted): msg = _LW('Unable to get deleted task %s') % task_id LOG.warn(msg) raise exception.TaskNotFound(task_id=task_id) if not _is_task_visible(context, task): LOG.debug("Forbidding request, task %s is not visible", task_id) msg = _("Forbidding request, task %s is not visible") % task_id raise exception.Forbidden(msg) task_info = _task_info_get(task_id) return task, task_info @log_call def task_delete(context, task_id): global DATA try: DATA['tasks'][task_id]['deleted'] = True DATA['tasks'][task_id]['deleted_at'] = timeutils.utcnow() DATA['tasks'][task_id]['updated_at'] = timeutils.utcnow() return copy.deepcopy(DATA['tasks'][task_id]) except KeyError: LOG.debug("No task found with ID %s", task_id) raise exception.TaskNotFound(task_id=task_id) @log_call def task_get_all(context, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc'): """ Get all tasks that match zero or more filters. :param filters: dict of filter keys and values. :param marker: task id after which to start page :param limit: maximum number of tasks to return :param sort_key: task attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :returns: tasks set """ filters = filters or {} tasks = DATA['tasks'].values() tasks = _filter_tasks(tasks, filters, context) tasks = _sort_tasks(tasks, sort_key, sort_dir) tasks = _paginate_tasks(context, tasks, marker, limit, filters.get('deleted')) filtered_tasks = [] for task in tasks: filtered_tasks.append(_format_task_from_db(task, task_info_ref=None)) return filtered_tasks def _is_task_visible(context, task): """Return True if the task is visible in this context.""" # Is admin == task visible if context.is_admin: return True # No owner == task visible if task['owner'] is None: return True # Perform tests based on whether we have an owner if context.owner is not None: if context.owner == task['owner']: return True return False def _filter_tasks(tasks, filters, context, admin_as_user=False): filtered_tasks = [] for task in tasks: has_ownership = context.owner and task['owner'] == context.owner can_see = (has_ownership or (context.is_admin and not admin_as_user)) if not can_see: continue add = True for k, value in six.iteritems(filters): add = task[k] == value and task['deleted'] is False if not add: break if add: filtered_tasks.append(task) return filtered_tasks def _sort_tasks(tasks, sort_key, sort_dir): reverse = False if tasks and not (sort_key in tasks[0]): raise exception.InvalidSortKey() keyfn = lambda x: (x[sort_key] if x[sort_key] is not None else '', x['created_at'], x['id']) reverse = sort_dir == 'desc' tasks.sort(key=keyfn, reverse=reverse) return tasks def _paginate_tasks(context, tasks, marker, limit, show_deleted): start = 0 end = -1 if marker is None: start = 0 else: # Check that the task is accessible _task_get(context, marker, force_show_deleted=show_deleted) for i, task in enumerate(tasks): if task['id'] == marker: start = i + 1 break else: if task: raise exception.TaskNotFound(task_id=task['id']) else: msg = _("Task does not exist") raise exception.NotFound(message=msg) end = start + limit if limit is not None else None return tasks[start:end] def _task_info_create(task_id, values): """Create a Task Info for Task with given task ID""" global DATA task_info = _task_info_format(task_id, **values) DATA['task_info'][task_id] = task_info return task_info def _task_info_update(task_id, values): """Update Task Info for Task with given task ID and updated values""" global DATA try: task_info = DATA['task_info'][task_id] except KeyError: LOG.debug("No task info found with task id %s", task_id) raise exception.TaskNotFound(task_id=task_id) task_info.update(values) DATA['task_info'][task_id] = task_info return task_info def _task_info_get(task_id): """Get Task Info for Task with given task ID""" global DATA try: task_info = DATA['task_info'][task_id] except KeyError: msg = _LW('Could not find task info %s') % task_id LOG.warn(msg) raise exception.TaskNotFound(task_id=task_id) return task_info def _metadef_delete_namespace_content(get_func, key, context, namespace_name): global DATA metadefs = get_func(context, namespace_name) data = DATA[key] for metadef in metadefs: data.remove(metadef) return metadefs @log_call def metadef_namespace_create(context, values): """Create a namespace object""" global DATA namespace_values = copy.deepcopy(values) namespace_name = namespace_values.get('namespace') required_attributes = ['namespace', 'owner'] allowed_attributes = ['namespace', 'owner', 'display_name', 'description', 'visibility', 'protected'] for namespace in DATA['metadef_namespaces']: if namespace['namespace'] == namespace_name: LOG.debug("Can not create the metadata definition namespace. " "Namespace=%s already exists.", namespace_name) raise exception.MetadefDuplicateNamespace( namespace_name=namespace_name) for key in required_attributes: if key not in namespace_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(namespace_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) namespace = _format_namespace(namespace_values) DATA['metadef_namespaces'].append(namespace) return namespace @log_call def metadef_namespace_update(context, namespace_id, values): """Update a namespace object""" global DATA namespace_values = copy.deepcopy(values) namespace = metadef_namespace_get_by_id(context, namespace_id) if namespace['namespace'] != values['namespace']: for db_namespace in DATA['metadef_namespaces']: if db_namespace['namespace'] == values['namespace']: LOG.debug("Invalid update. It would result in a duplicate " "metadata definition namespace with the same " "name of %s", values['namespace']) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition namespace with the same" " name of %s") % values['namespace']) raise exception.MetadefDuplicateNamespace(emsg) DATA['metadef_namespaces'].remove(namespace) namespace.update(namespace_values) namespace['updated_at'] = timeutils.utcnow() DATA['metadef_namespaces'].append(namespace) return namespace @log_call def metadef_namespace_get_by_id(context, namespace_id): """Get a namespace object""" try: namespace = next(namespace for namespace in DATA['metadef_namespaces'] if namespace['id'] == namespace_id) except StopIteration: msg = (_("Metadata definition namespace not found for id=%s") % namespace_id) LOG.warn(msg) raise exception.MetadefNamespaceNotFound(msg) if not _is_namespace_visible(context, namespace): LOG.debug("Forbidding request, metadata definition namespace=%s " "is not visible.", namespace.namespace) emsg = _("Forbidding request, metadata definition namespace=%s " "is not visible.") % namespace.namespace raise exception.MetadefForbidden(emsg) return namespace @log_call def metadef_namespace_get(context, namespace_name): """Get a namespace object""" try: namespace = next(namespace for namespace in DATA['metadef_namespaces'] if namespace['namespace'] == namespace_name) except StopIteration: LOG.debug("No namespace found with name %s", namespace_name) raise exception.MetadefNamespaceNotFound( namespace_name=namespace_name) _check_namespace_visibility(context, namespace, namespace_name) return namespace @log_call def metadef_namespace_get_all(context, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): """Get a namespaces list""" resource_types = filters.get('resource_types', []) if filters else [] visibility = filters.get('visibility', None) if filters else None namespaces = [] for namespace in DATA['metadef_namespaces']: if not _is_namespace_visible(context, namespace): continue if visibility and namespace['visibility'] != visibility: continue if resource_types: for association in DATA['metadef_namespace_resource_types']: if association['namespace_id'] == namespace['id']: if association['name'] in resource_types: break else: continue namespaces.append(namespace) return namespaces @log_call def metadef_namespace_delete(context, namespace_name): """Delete a namespace object""" global DATA namespace = metadef_namespace_get(context, namespace_name) DATA['metadef_namespaces'].remove(namespace) return namespace @log_call def metadef_namespace_delete_content(context, namespace_name): """Delete a namespace content""" global DATA namespace = metadef_namespace_get(context, namespace_name) namespace_id = namespace['id'] objects = [] for object in DATA['metadef_objects']: if object['namespace_id'] != namespace_id: objects.append(object) DATA['metadef_objects'] = objects properties = [] for property in DATA['metadef_objects']: if property['namespace_id'] != namespace_id: properties.append(object) DATA['metadef_objects'] = properties return namespace @log_call def metadef_object_get(context, namespace_name, object_name): """Get a metadef object""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for object in DATA['metadef_objects']: if (object['namespace_id'] == namespace['id'] and object['name'] == object_name): return object else: LOG.debug("The metadata definition object with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': object_name, 'namespace_name': namespace_name}) raise exception.MetadefObjectNotFound(namespace_name=namespace_name, object_name=object_name) @log_call def metadef_object_get_by_id(context, namespace_name, object_id): """Get a metadef object""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for object in DATA['metadef_objects']: if (object['namespace_id'] == namespace['id'] and object['id'] == object_id): return object else: msg = (_("Metadata definition object not found for id=%s") % object_id) LOG.warn(msg) raise exception.MetadefObjectNotFound(msg) @log_call def metadef_object_get_all(context, namespace_name): """Get a metadef objects list""" namespace = metadef_namespace_get(context, namespace_name) objects = [] _check_namespace_visibility(context, namespace, namespace_name) for object in DATA['metadef_objects']: if object['namespace_id'] == namespace['id']: objects.append(object) return objects @log_call def metadef_object_create(context, namespace_name, values): """Create a metadef object""" global DATA object_values = copy.deepcopy(values) object_name = object_values['name'] required_attributes = ['name'] allowed_attributes = ['name', 'description', 'json_schema', 'required'] namespace = metadef_namespace_get(context, namespace_name) for object in DATA['metadef_objects']: if (object['name'] == object_name and object['namespace_id'] == namespace['id']): LOG.debug("A metadata definition object with name=%(name)s " "in namespace=%(namespace_name)s already exists.", {'name': object_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateObject( object_name=object_name, namespace_name=namespace_name) for key in required_attributes: if key not in object_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(object_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) object_values['namespace_id'] = namespace['id'] _check_namespace_visibility(context, namespace, namespace_name) object = _format_object(object_values) DATA['metadef_objects'].append(object) return object @log_call def metadef_object_update(context, namespace_name, object_id, values): """Update a metadef object""" global DATA namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) object = metadef_object_get_by_id(context, namespace_name, object_id) if object['name'] != values['name']: for db_object in DATA['metadef_objects']: if (db_object['name'] == values['name'] and db_object['namespace_id'] == namespace['id']): LOG.debug("Invalid update. It would result in a duplicate " "metadata definition object with same name=%(name)s " "in namespace=%(namespace_name)s.", {'name': object['name'], 'namespace_name': namespace_name}) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition object with the same" " name=%(name)s " " in namespace=%(namespace_name)s.") % {'name': object['name'], 'namespace_name': namespace_name}) raise exception.MetadefDuplicateObject(emsg) DATA['metadef_objects'].remove(object) object.update(values) object['updated_at'] = timeutils.utcnow() DATA['metadef_objects'].append(object) return object @log_call def metadef_object_delete(context, namespace_name, object_name): """Delete a metadef object""" global DATA object = metadef_object_get(context, namespace_name, object_name) DATA['metadef_objects'].remove(object) return object def metadef_object_delete_namespace_content(context, namespace_name, session=None): """Delete an object or raise if namespace or object doesn't exist.""" return _metadef_delete_namespace_content( metadef_object_get_all, 'metadef_objects', context, namespace_name) @log_call def metadef_object_count(context, namespace_name): """Get metadef object count in a namespace""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) count = 0 for object in DATA['metadef_objects']: if object['namespace_id'] == namespace['id']: count = count + 1 return count @log_call def metadef_property_count(context, namespace_name): """Get properties count in a namespace""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) count = 0 for property in DATA['metadef_properties']: if property['namespace_id'] == namespace['id']: count = count + 1 return count @log_call def metadef_property_create(context, namespace_name, values): """Create a metadef property""" global DATA property_values = copy.deepcopy(values) property_name = property_values['name'] required_attributes = ['name'] allowed_attributes = ['name', 'description', 'json_schema', 'required'] namespace = metadef_namespace_get(context, namespace_name) for property in DATA['metadef_properties']: if (property['name'] == property_name and property['namespace_id'] == namespace['id']): LOG.debug("Can not create metadata definition property. A property" " with name=%(name)s already exists in" " namespace=%(namespace_name)s.", {'name': property_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateProperty( property_name=property_name, namespace_name=namespace_name) for key in required_attributes: if key not in property_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(property_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) property_values['namespace_id'] = namespace['id'] _check_namespace_visibility(context, namespace, namespace_name) property = _format_property(property_values) DATA['metadef_properties'].append(property) return property @log_call def metadef_property_update(context, namespace_name, property_id, values): """Update a metadef property""" global DATA namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) property = metadef_property_get_by_id(context, namespace_name, property_id) if property['name'] != values['name']: for db_property in DATA['metadef_properties']: if (db_property['name'] == values['name'] and db_property['namespace_id'] == namespace['id']): LOG.debug("Invalid update. It would result in a duplicate" " metadata definition property with the same" " name=%(name)s" " in namespace=%(namespace_name)s.", {'name': property['name'], 'namespace_name': namespace_name}) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition property with the same" " name=%(name)s" " in namespace=%(namespace_name)s.") % {'name': property['name'], 'namespace_name': namespace_name}) raise exception.MetadefDuplicateProperty(emsg) DATA['metadef_properties'].remove(property) property.update(values) property['updated_at'] = timeutils.utcnow() DATA['metadef_properties'].append(property) return property @log_call def metadef_property_get_all(context, namespace_name): """Get a metadef properties list""" namespace = metadef_namespace_get(context, namespace_name) properties = [] _check_namespace_visibility(context, namespace, namespace_name) for property in DATA['metadef_properties']: if property['namespace_id'] == namespace['id']: properties.append(property) return properties @log_call def metadef_property_get_by_id(context, namespace_name, property_id): """Get a metadef property""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for property in DATA['metadef_properties']: if (property['namespace_id'] == namespace['id'] and property['id'] == property_id): return property else: msg = (_("Metadata definition property not found for id=%s") % property_id) LOG.warn(msg) raise exception.MetadefPropertyNotFound(msg) @log_call def metadef_property_get(context, namespace_name, property_name): """Get a metadef property""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for property in DATA['metadef_properties']: if (property['namespace_id'] == namespace['id'] and property['name'] == property_name): return property else: LOG.debug("No property found with name=%(name)s in" " namespace=%(namespace_name)s ", {'name': property_name, 'namespace_name': namespace_name}) raise exception.MetadefPropertyNotFound(namespace_name=namespace_name, property_name=property_name) @log_call def metadef_property_delete(context, namespace_name, property_name): """Delete a metadef property""" global DATA property = metadef_property_get(context, namespace_name, property_name) DATA['metadef_properties'].remove(property) return property def metadef_property_delete_namespace_content(context, namespace_name, session=None): """Delete a property or raise if it or namespace doesn't exist.""" return _metadef_delete_namespace_content( metadef_property_get_all, 'metadef_properties', context, namespace_name) @log_call def metadef_resource_type_create(context, values): """Create a metadef resource type""" global DATA resource_type_values = copy.deepcopy(values) resource_type_name = resource_type_values['name'] allowed_attrubites = ['name', 'protected'] for resource_type in DATA['metadef_resource_types']: if resource_type['name'] == resource_type_name: raise exception.Duplicate() incorrect_keys = set(resource_type_values.keys()) - set(allowed_attrubites) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) resource_type = _format_resource_type(resource_type_values) DATA['metadef_resource_types'].append(resource_type) return resource_type @log_call def metadef_resource_type_get_all(context): """List all resource types""" return DATA['metadef_resource_types'] @log_call def metadef_resource_type_get(context, resource_type_name): """Get a resource type""" try: resource_type = next(resource_type for resource_type in DATA['metadef_resource_types'] if resource_type['name'] == resource_type_name) except StopIteration: LOG.debug("No resource type found with name %s", resource_type_name) raise exception.MetadefResourceTypeNotFound( resource_type_name=resource_type_name) return resource_type @log_call def metadef_resource_type_association_create(context, namespace_name, values): global DATA association_values = copy.deepcopy(values) namespace = metadef_namespace_get(context, namespace_name) resource_type_name = association_values['name'] resource_type = metadef_resource_type_get(context, resource_type_name) required_attributes = ['name', 'properties_target', 'prefix'] allowed_attributes = copy.deepcopy(required_attributes) for association in DATA['metadef_namespace_resource_types']: if (association['namespace_id'] == namespace['id'] and association['resource_type'] == resource_type['id']): LOG.debug("The metadata definition resource-type association of" " resource_type=%(resource_type_name)s to" " namespace=%(namespace_name)s, already exists.", {'resource_type_name': resource_type_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateResourceTypeAssociation( resource_type_name=resource_type_name, namespace_name=namespace_name) for key in required_attributes: if key not in association_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(association_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) association = _format_association(namespace, resource_type, association_values) DATA['metadef_namespace_resource_types'].append(association) return association @log_call def metadef_resource_type_association_get(context, namespace_name, resource_type_name): namespace = metadef_namespace_get(context, namespace_name) resource_type = metadef_resource_type_get(context, resource_type_name) for association in DATA['metadef_namespace_resource_types']: if (association['namespace_id'] == namespace['id'] and association['resource_type'] == resource_type['id']): return association else: LOG.debug("No resource type association found associated with " "namespace %s and resource type %s", namespace_name, resource_type_name) raise exception.MetadefResourceTypeAssociationNotFound( resource_type_name=resource_type_name, namespace_name=namespace_name) @log_call def metadef_resource_type_association_get_all_by_namespace(context, namespace_name): namespace = metadef_namespace_get(context, namespace_name) namespace_resource_types = [] for resource_type in DATA['metadef_namespace_resource_types']: if resource_type['namespace_id'] == namespace['id']: namespace_resource_types.append(resource_type) return namespace_resource_types @log_call def metadef_resource_type_association_delete(context, namespace_name, resource_type_name): global DATA resource_type = metadef_resource_type_association_get(context, namespace_name, resource_type_name) DATA['metadef_namespace_resource_types'].remove(resource_type) return resource_type @log_call def metadef_tag_get(context, namespace_name, name): """Get a metadef tag""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id'] and tag['name'] == name: return tag else: LOG.debug("The metadata definition tag with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': name, 'namespace_name': namespace_name}) raise exception.MetadefTagNotFound(name=name, namespace_name=namespace_name) @log_call def metadef_tag_get_by_id(context, namespace_name, id): """Get a metadef tag""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id'] and tag['id'] == id: return tag else: msg = (_("Metadata definition tag not found for id=%s") % id) LOG.warn(msg) raise exception.MetadefTagNotFound(msg) @log_call def metadef_tag_get_all(context, namespace_name, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir=None, session=None): """Get a metadef tags list""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) tags = [] for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id']: tags.append(tag) return tags @log_call def metadef_tag_create(context, namespace_name, values): """Create a metadef tag""" global DATA tag_values = copy.deepcopy(values) tag_name = tag_values['name'] required_attributes = ['name'] allowed_attributes = ['name'] namespace = metadef_namespace_get(context, namespace_name) for tag in DATA['metadef_tags']: if tag['name'] == tag_name and tag['namespace_id'] == namespace['id']: LOG.debug("A metadata definition tag with name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': tag_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateTag( name=tag_name, namespace_name=namespace_name) for key in required_attributes: if key not in tag_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(tag_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) tag_values['namespace_id'] = namespace['id'] _check_namespace_visibility(context, namespace, namespace_name) tag = _format_tag(tag_values) DATA['metadef_tags'].append(tag) return tag @log_call def metadef_tag_create_tags(context, namespace_name, tag_list): """Create a metadef tag""" global DATA namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) required_attributes = ['name'] allowed_attributes = ['name'] data_tag_list = [] tag_name_list = [] for tag_value in tag_list: tag_values = copy.deepcopy(tag_value) tag_name = tag_values['name'] for key in required_attributes: if key not in tag_values: raise exception.Invalid('%s is a required attribute' % key) incorrect_keys = set(tag_values.keys()) - set(allowed_attributes) if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) if tag_name in tag_name_list: LOG.debug("A metadata definition tag with name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': tag_name, 'namespace_name': namespace_name}) raise exception.MetadefDuplicateTag( name=tag_name, namespace_name=namespace_name) else: tag_name_list.append(tag_name) tag_values['namespace_id'] = namespace['id'] data_tag_list.append(_format_tag(tag_values)) DATA['metadef_tags'] = [] for tag in data_tag_list: DATA['metadef_tags'].append(tag) return data_tag_list @log_call def metadef_tag_update(context, namespace_name, id, values): """Update a metadef tag""" global DATA namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) tag = metadef_tag_get_by_id(context, namespace_name, id) if tag['name'] != values['name']: for db_tag in DATA['metadef_tags']: if (db_tag['name'] == values['name'] and db_tag['namespace_id'] == namespace['id']): LOG.debug("Invalid update. It would result in a duplicate" " metadata definition tag with same name=%(name)s " " in namespace=%(namespace_name)s.", {'name': tag['name'], 'namespace_name': namespace_name}) raise exception.MetadefDuplicateTag( name=tag['name'], namespace_name=namespace_name) DATA['metadef_tags'].remove(tag) tag.update(values) tag['updated_at'] = timeutils.utcnow() DATA['metadef_tags'].append(tag) return tag @log_call def metadef_tag_delete(context, namespace_name, name): """Delete a metadef tag""" global DATA tags = metadef_tag_get(context, namespace_name, name) DATA['metadef_tags'].remove(tags) return tags def metadef_tag_delete_namespace_content(context, namespace_name, session=None): """Delete an tag or raise if namespace or tag doesn't exist.""" return _metadef_delete_namespace_content( metadef_tag_get_all, 'metadef_tags', context, namespace_name) @log_call def metadef_tag_count(context, namespace_name): """Get metadef tag count in a namespace""" namespace = metadef_namespace_get(context, namespace_name) _check_namespace_visibility(context, namespace, namespace_name) count = 0 for tag in DATA['metadef_tags']: if tag['namespace_id'] == namespace['id']: count = count + 1 return count def _artifact_format(artifact_id, **values): dt = timeutils.utcnow() artifact = { 'id': artifact_id, 'type_name': None, 'type_version_prefix': None, 'type_version_suffix': None, 'type_version_meta': None, 'version_prefix': None, 'version_suffix': None, 'version_meta': None, 'description': None, 'visibility': None, 'state': None, 'owner': None, 'scope': None, 'tags': [], 'properties': {}, 'blobs': [], 'created_at': dt, 'updated_at': dt, 'deleted_at': None, 'deleted': False, } artifact.update(values) return artifact @log_call def artifact_create(context, values, type_name, type_version): global DATA artifact_id = values.get('id', str(uuid.uuid4())) if artifact_id in DATA['artifacts']: raise exception.Duplicate() if 'state' not in values: raise exception.Invalid('state is a required attribute') allowed_keys = set(['id', 'type_name', 'type_version', 'name', 'version', 'description', 'visibility', 'state', 'owner', 'scope']) incorrect_keys = set(values.keys()) - allowed_keys if incorrect_keys: raise exception.Invalid( 'The keys %s are not valid' % str(incorrect_keys)) artifact = _artifact_format(artifact_id, **values) DATA['artifacts'][artifact_id] = artifact return copy.deepcopy(artifact) def _artifact_get(context, artifact_id, type_name, type_version=None): try: artifact = DATA['artifacts'][artifact_id] if (artifact['type_name'] != type_name or (type_version is not None and artifact['type_version'] != type_version)): raise KeyError except KeyError: LOG.info(_LI('Could not find artifact %s'), artifact_id) raise exception.NotFound() if artifact['deleted_at']: LOG.info(_LI('Unable to get deleted image')) raise exception.NotFound() return artifact @log_call def artifact_get(context, artifact_id, type_name, type_version=None, session=None): artifact = _artifact_get(context, artifact_id, type_name, type_version) return copy.deepcopy(artifact) def _format_association(namespace, resource_type, association_values): association = { 'namespace_id': namespace['id'], 'resource_type': resource_type['id'], 'properties_target': None, 'prefix': None, 'created_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow() } association.update(association_values) return association def _format_resource_type(values): dt = timeutils.utcnow() resource_type = { 'id': _get_metadef_id(), 'name': values['name'], 'protected': True, 'created_at': dt, 'updated_at': dt } resource_type.update(values) return resource_type def _format_property(values): property = { 'id': _get_metadef_id(), 'namespace_id': None, 'name': None, 'json_schema': None } property.update(values) return property def _format_namespace(values): dt = timeutils.utcnow() namespace = { 'id': _get_metadef_id(), 'namespace': None, 'display_name': None, 'description': None, 'visibility': 'private', 'protected': False, 'owner': None, 'created_at': dt, 'updated_at': dt } namespace.update(values) return namespace def _format_object(values): dt = timeutils.utcnow() object = { 'id': _get_metadef_id(), 'namespace_id': None, 'name': None, 'description': None, 'json_schema': None, 'required': None, 'created_at': dt, 'updated_at': dt } object.update(values) return object def _format_tag(values): dt = timeutils.utcnow() tag = { 'id': _get_metadef_id(), 'namespace_id': None, 'name': None, 'created_at': dt, 'updated_at': dt } tag.update(values) return tag def _is_namespace_visible(context, namespace): """Return true if namespace is visible in this context""" if context.is_admin: return True if namespace.get('visibility', '') == 'public': return True if namespace['owner'] is None: return True if context.owner is not None: if context.owner == namespace['owner']: return True return False def _check_namespace_visibility(context, namespace, namespace_name): if not _is_namespace_visible(context, namespace): LOG.debug("Forbidding request, metadata definition namespace=%s " "is not visible.", namespace_name) emsg = _("Forbidding request, metadata definition namespace=%s" " is not visible.") % namespace_name raise exception.MetadefForbidden(emsg) def _get_metadef_id(): global INDEX INDEX += 1 return INDEX glance-12.0.0/glance/db/sqlalchemy/0000775000567000056710000000000012701407204020141 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/sqlalchemy/migrate_repo/0000775000567000056710000000000012701407204022616 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/sqlalchemy/migrate_repo/README0000664000567000056710000000015312701407047023502 0ustar jenkinsjenkins00000000000000This is a database migration repository. More information at http://code.google.com/p/sqlalchemy-migrate/ glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/0000775000567000056710000000000012701407204024466 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql0000664000567000056710000000320412701407047031127 0ustar jenkinsjenkins00000000000000CREATE TEMPORARY TABLE images_backup ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER NOT NULL, min_ram INTEGER NOT NULL, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); INSERT INTO images_backup SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram FROM images; DROP TABLE images; CREATE TABLE images ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER, min_ram INTEGER, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); CREATE INDEX ix_images_deleted ON images (deleted); CREATE INDEX ix_images_is_public ON images (is_public); INSERT INTO images SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram FROM images_backup; DROP TABLE images_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py0000664000567000056710000002114212701407047034437 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This migration handles migrating encrypted image location values from the unquoted form to the quoted form. If 'metadata_encryption_key' is specified in the config then this migration performs the following steps for every entry in the images table: 1. Decrypt the location value with the metadata_encryption_key 2. Changes the value to its quoted form 3. Encrypts the new value with the metadata_encryption_key 4. Inserts the new value back into the row Fixes bug #1081043 """ import types # noqa from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import six.moves.urllib.parse as urlparse import sqlalchemy from glance.common import crypt from glance.common import exception from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('metadata_encryption_key', 'glance.common.config') def upgrade(migrate_engine): migrate_location_credentials(migrate_engine, to_quoted=True) def downgrade(migrate_engine): migrate_location_credentials(migrate_engine, to_quoted=False) def migrate_location_credentials(migrate_engine, to_quoted): """ Migrate location credentials for encrypted swift uri's between the quoted and unquoted forms. :param migrate_engine: The configured db engine :param to_quoted: If True, migrate location credentials from unquoted to quoted form. If False, do the reverse. """ if not CONF.metadata_encryption_key: msg = _LI("'metadata_encryption_key' was not specified in the config" " file or a config file was not specified. This means that" " this migration is a NOOP.") LOG.info(msg) return meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine images_table = sqlalchemy.Table('images', meta, autoload=True) images = list(images_table.select().execute()) for image in images: try: fixed_uri = fix_uri_credentials(image['location'], to_quoted) images_table.update().where( images_table.c.id == image['id']).values( location=fixed_uri).execute() except exception.Invalid: msg = _LW("Failed to decrypt location value for image" " %(image_id)s") % {'image_id': image['id']} LOG.warn(msg) except exception.BadStoreUri as e: reason = encodeutils.exception_to_unicode(e) msg = _LE("Invalid store uri for image: %(image_id)s. " "Details: %(reason)s") % {'image_id': image.id, 'reason': reason} LOG.exception(msg) raise def decrypt_location(uri): return crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri) def encrypt_location(uri): return crypt.urlsafe_encrypt(CONF.metadata_encryption_key, uri, 64) def fix_uri_credentials(uri, to_quoted): """ Fix the given uri's embedded credentials by round-tripping with StoreLocation. If to_quoted is True, the uri is assumed to have credentials that have not been quoted, and the resulting uri will contain quoted credentials. If to_quoted is False, the uri is assumed to have credentials that have been quoted, and the resulting uri will contain credentials that have not been quoted. """ if not uri: return try: decrypted_uri = decrypt_location(uri) # NOTE (ameade): If a uri is not encrypted or incorrectly encoded then we # we raise an exception. except (TypeError, ValueError) as e: raise exception.Invalid(str(e)) return legacy_parse_uri(decrypted_uri, to_quoted) def legacy_parse_uri(uri, to_quote): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj If to_quoted is True, the uri is assumed to have credentials that have not been quoted, and the resulting uri will contain quoted credentials. If to_quoted is False, the uri is assumed to have credentials that have been quoted, and the resulting uri will contain credentials that have not been quoted. """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _("URI cannot contain more than one occurrence of a scheme." "If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the swift+http:// scheme, " "like so: " "swift+http://user:pass@authurl.com/v1/container/obj") raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) if pieces.scheme not in ('swift', 'swift+http', 'swift+https'): raise exception.BadStoreUri(message="Unacceptable scheme: '%s'" % pieces.scheme) scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') # User can be account:user, in which case cred_parts[0:2] will be # the account and user. Combine them into a single username of # account:user if to_quote: if len(cred_parts) == 1: reason = (_("Badly formed credentials '%(creds)s' in Swift " "URI") % {'creds': creds}) raise exception.BadStoreUri(message=reason) elif len(cred_parts) == 3: user = ':'.join(cred_parts[0:2]) else: user = cred_parts[0] key = cred_parts[-1] user = user key = key else: if len(cred_parts) != 2: reason = (_("Badly formed credentials in Swift URI.")) raise exception.BadStoreUri(message=reason) user, key = cred_parts user = urlparse.unquote(user) key = urlparse.unquote(key) else: user = None key = None path_parts = path.split('/') try: obj = path_parts.pop() container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) auth_or_store_url = '/'.join(path_parts) except IndexError: reason = _("Badly formed S3 URI: %(uri)s") % {'uri': uri} raise exception.BadStoreUri(message=reason) if auth_or_store_url.startswith('http://'): auth_or_store_url = auth_or_store_url[len('http://'):] elif auth_or_store_url.startswith('https://'): auth_or_store_url = auth_or_store_url[len('https://'):] credstring = '' if user and key: if to_quote: quote_user = urlparse.quote(user) quote_key = urlparse.quote(key) else: quote_user = user quote_key = key credstring = '%s:%s@' % (quote_user, quote_key) auth_or_store_url = auth_or_store_url.strip('/') container = container.strip('/') obj = obj.strip('/') uri = '%s://%s%s/%s/%s' % (scheme, credstring, auth_or_store_url, container, obj) return encrypt_location(uri) ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.pyglance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.p0000664000567000056710000003743012701407047034772 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy from sqlalchemy import inspect from sqlalchemy import (Table, Index, UniqueConstraint) from sqlalchemy.schema import (AddConstraint, DropConstraint) def _change_db2_unique_constraint(operation_type, constraint_name, *columns): constraint = migrate.UniqueConstraint(*columns, name=constraint_name) operation = getattr(constraint, operation_type) operation() def upgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine inspector = inspect(migrate_engine) metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) metadef_properties = Table('metadef_properties', meta, autoload=True) metadef_objects = Table('metadef_objects', meta, autoload=True) metadef_ns_res_types = Table('metadef_namespace_resource_types', meta, autoload=True) metadef_resource_types = Table('metadef_resource_types', meta, autoload=True) metadef_tags = Table('metadef_tags', meta, autoload=True) constraints = [('ix_namespaces_namespace', [metadef_namespaces.c.namespace]), ('ix_objects_namespace_id_name', [metadef_objects.c.namespace_id, metadef_objects.c.name]), ('ix_metadef_properties_namespace_id_name', [metadef_properties.c.namespace_id, metadef_properties.c.name])] metadef_tags_constraints = inspector.get_unique_constraints('metadef_tags') for constraint in metadef_tags_constraints: if set(constraint['column_names']) == set(['namespace_id', 'name']): constraints.append((constraint['name'], [metadef_tags.c.namespace_id, metadef_tags.c.name])) if meta.bind.name == "ibm_db_sa": # For db2, the following constraints need to be dropped first, # otherwise the index like ix_metadef_ns_res_types_namespace_id # will fail to create. These constraints will be added back at # the end. It should not affect the origional logic for other # database backends. for (constraint_name, cols) in constraints: _change_db2_unique_constraint('drop', constraint_name, *cols) else: Index('ix_namespaces_namespace', metadef_namespaces.c.namespace).drop() Index('ix_objects_namespace_id_name', metadef_objects.c.namespace_id, metadef_objects.c.name).drop() Index('ix_metadef_properties_namespace_id_name', metadef_properties.c.namespace_id, metadef_properties.c.name).drop() fkc = migrate.ForeignKeyConstraint([metadef_tags.c.namespace_id], [metadef_namespaces.c.id]) fkc.create() # `migrate` module removes unique constraint after adding # foreign key to the table in sqlite. # The reason of this issue is that it isn't possible to add fkc to # existing table in sqlite. Instead of this we should recreate the table # with needed fkc in the declaration. Migrate package provide us with such # possibility, but unfortunately it recreates the table without # constraints. Create unique constraint manually. if migrate_engine.name == 'sqlite' and len( inspector.get_unique_constraints('metadef_tags')) == 0: uc = migrate.UniqueConstraint(metadef_tags.c.namespace_id, metadef_tags.c.name) uc.create() if meta.bind.name != "ibm_db_sa": Index('ix_tags_namespace_id_name', metadef_tags.c.namespace_id, metadef_tags.c.name).drop() Index('ix_metadef_tags_name', metadef_tags.c.name).create() Index('ix_metadef_tags_namespace_id', metadef_tags.c.namespace_id, metadef_tags.c.name).create() if migrate_engine.name == 'mysql': # We need to drop some foreign keys first because unique constraints # that we want to delete depend on them. So drop the fk and recreate # it again after unique constraint deletion. fkc = migrate.ForeignKeyConstraint([metadef_properties.c.namespace_id], [metadef_namespaces.c.id], name='metadef_properties_ibfk_1') fkc.drop() constraint = UniqueConstraint(metadef_properties.c.namespace_id, metadef_properties.c.name, name='namespace_id') migrate_engine.execute(DropConstraint(constraint)) fkc.create() fkc = migrate.ForeignKeyConstraint([metadef_objects.c.namespace_id], [metadef_namespaces.c.id], name='metadef_objects_ibfk_1') fkc.drop() constraint = UniqueConstraint(metadef_objects.c.namespace_id, metadef_objects.c.name, name='namespace_id') migrate_engine.execute(DropConstraint(constraint)) fkc.create() constraint = UniqueConstraint(metadef_ns_res_types.c.resource_type_id, metadef_ns_res_types.c.namespace_id, name='resource_type_id') migrate_engine.execute(DropConstraint(constraint)) constraint = UniqueConstraint(metadef_namespaces.c.namespace, name='namespace') migrate_engine.execute(DropConstraint(constraint)) constraint = UniqueConstraint(metadef_resource_types.c.name, name='name') migrate_engine.execute(DropConstraint(constraint)) if migrate_engine.name == 'postgresql': met_obj_index_name = ( inspector.get_unique_constraints('metadef_objects')[0]['name']) constraint = UniqueConstraint( metadef_objects.c.namespace_id, metadef_objects.c.name, name=met_obj_index_name) migrate_engine.execute(DropConstraint(constraint)) met_prop_index_name = ( inspector.get_unique_constraints('metadef_properties')[0]['name']) constraint = UniqueConstraint( metadef_properties.c.namespace_id, metadef_properties.c.name, name=met_prop_index_name) migrate_engine.execute(DropConstraint(constraint)) metadef_namespaces_name = ( inspector.get_unique_constraints( 'metadef_namespaces')[0]['name']) constraint = UniqueConstraint( metadef_namespaces.c.namespace, name=metadef_namespaces_name) migrate_engine.execute(DropConstraint(constraint)) metadef_resource_types_name = (inspector.get_unique_constraints( 'metadef_resource_types')[0]['name']) constraint = UniqueConstraint( metadef_resource_types.c.name, name=metadef_resource_types_name) migrate_engine.execute(DropConstraint(constraint)) constraint = UniqueConstraint( metadef_tags.c.namespace_id, metadef_tags.c.name, name='metadef_tags_namespace_id_name_key') migrate_engine.execute(DropConstraint(constraint)) Index('ix_metadef_ns_res_types_namespace_id', metadef_ns_res_types.c.namespace_id).create() Index('ix_metadef_namespaces_namespace', metadef_namespaces.c.namespace).create() Index('ix_metadef_namespaces_owner', metadef_namespaces.c.owner).create() Index('ix_metadef_objects_name', metadef_objects.c.name).create() Index('ix_metadef_objects_namespace_id', metadef_objects.c.namespace_id).create() Index('ix_metadef_properties_name', metadef_properties.c.name).create() Index('ix_metadef_properties_namespace_id', metadef_properties.c.namespace_id).create() if meta.bind.name == "ibm_db_sa": # For db2, add these constraints back. It should not affect the # origional logic for other database backends. for (constraint_name, cols) in constraints: _change_db2_unique_constraint('create', constraint_name, *cols) def downgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine inspector = inspect(migrate_engine) metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) metadef_properties = Table('metadef_properties', meta, autoload=True) metadef_objects = Table('metadef_objects', meta, autoload=True) metadef_ns_res_types = Table('metadef_namespace_resource_types', meta, autoload=True) metadef_resource_types = Table('metadef_resource_types', meta, autoload=True) metadef_tags = Table('metadef_tags', meta, autoload=True) constraints = [('ix_namespaces_namespace', [metadef_namespaces.c.namespace]), ('ix_objects_namespace_id_name', [metadef_objects.c.namespace_id, metadef_objects.c.name]), ('ix_metadef_properties_namespace_id_name', [metadef_properties.c.namespace_id, metadef_properties.c.name])] metadef_tags_constraints = inspector.get_unique_constraints('metadef_tags') for constraint in metadef_tags_constraints: if set(constraint['column_names']) == set(['namespace_id', 'name']): constraints.append((constraint['name'], [metadef_tags.c.namespace_id, metadef_tags.c.name])) if meta.bind.name == "ibm_db_sa": # For db2, the following constraints need to be dropped first, # otherwise the index like ix_metadef_ns_res_types_namespace_id # will fail to drop. These constraints will be added back at # the end. It should not affect the origional logic for other # database backends. for (constraint_name, cols) in constraints: _change_db2_unique_constraint('drop', constraint_name, *cols) else: Index('ix_namespaces_namespace', metadef_namespaces.c.namespace).create() Index('ix_objects_namespace_id_name', metadef_objects.c.namespace_id, metadef_objects.c.name).create() Index('ix_metadef_properties_namespace_id_name', metadef_properties.c.namespace_id, metadef_properties.c.name).create() Index('ix_metadef_tags_name', metadef_tags.c.name).drop() Index('ix_metadef_tags_namespace_id', metadef_tags.c.namespace_id, metadef_tags.c.name).drop() if migrate_engine.name != 'sqlite': fkc = migrate.ForeignKeyConstraint([metadef_tags.c.namespace_id], [metadef_namespaces.c.id]) fkc.drop() if meta.bind.name != "ibm_db_sa": # This index would not be created when it is db2 backend. Index('ix_tags_namespace_id_name', metadef_tags.c.namespace_id, metadef_tags.c.name).create() else: # NOTE(ochuprykov): fkc can't be dropped via `migrate` in sqlite,so it # is necessary to recreate table manually and populate it with data temp = Table('temp_', meta, *( [c.copy() for c in metadef_tags.columns])) temp.create() migrate_engine.execute('insert into temp_ select * from metadef_tags') metadef_tags.drop() migrate_engine.execute('alter table temp_ rename to metadef_tags') # Refresh old metadata for this table meta = sqlalchemy.MetaData() meta.bind = migrate_engine metadef_tags = Table('metadef_tags', meta, autoload=True) Index('ix_tags_namespace_id_name', metadef_tags.c.namespace_id, metadef_tags.c.name).create() uc = migrate.UniqueConstraint(metadef_tags.c.namespace_id, metadef_tags.c.name) uc.create() if migrate_engine.name == 'mysql': constraint = UniqueConstraint(metadef_properties.c.namespace_id, metadef_properties.c.name, name='namespace_id') migrate_engine.execute(AddConstraint(constraint)) constraint = UniqueConstraint(metadef_objects.c.namespace_id, metadef_objects.c.name, name='namespace_id') migrate_engine.execute(AddConstraint(constraint)) constraint = UniqueConstraint(metadef_ns_res_types.c.resource_type_id, metadef_ns_res_types.c.namespace_id, name='resource_type_id') migrate_engine.execute(AddConstraint(constraint)) constraint = UniqueConstraint(metadef_namespaces.c.namespace, name='namespace') migrate_engine.execute(AddConstraint(constraint)) constraint = UniqueConstraint(metadef_resource_types.c.name, name='name') migrate_engine.execute(AddConstraint(constraint)) if migrate_engine.name == 'postgresql': constraint = UniqueConstraint( metadef_objects.c.namespace_id, metadef_objects.c.name) migrate_engine.execute(AddConstraint(constraint)) constraint = UniqueConstraint( metadef_properties.c.namespace_id, metadef_properties.c.name) migrate_engine.execute(AddConstraint(constraint)) constraint = UniqueConstraint( metadef_namespaces.c.namespace) migrate_engine.execute(AddConstraint(constraint)) constraint = UniqueConstraint( metadef_resource_types.c.name) migrate_engine.execute(AddConstraint(constraint)) constraint = UniqueConstraint( metadef_tags.c.namespace_id, metadef_tags.c.name, name='metadef_tags_namespace_id_name_key') migrate_engine.execute(AddConstraint(constraint)) if migrate_engine.name == 'mysql': fkc = migrate.ForeignKeyConstraint( [metadef_ns_res_types.c.resource_type_id], [metadef_namespaces.c.id], name='metadef_namespace_resource_types_ibfk_2') fkc.drop() Index('ix_metadef_ns_res_types_namespace_id', metadef_ns_res_types.c.namespace_id).drop() fkc.create() else: Index('ix_metadef_ns_res_types_namespace_id', metadef_ns_res_types.c.namespace_id).drop() Index('ix_metadef_namespaces_namespace', metadef_namespaces.c.namespace).drop() Index('ix_metadef_namespaces_owner', metadef_namespaces.c.owner).drop() Index('ix_metadef_objects_name', metadef_objects.c.name).drop() Index('ix_metadef_objects_namespace_id', metadef_objects.c.namespace_id).drop() Index('ix_metadef_properties_name', metadef_properties.c.name).drop() Index('ix_metadef_properties_namespace_id', metadef_properties.c.namespace_id).drop() if meta.bind.name == "ibm_db_sa": # For db2, add these constraints back. It should not affect the # origional logic for other database backends. for (constraint_name, cols) in constraints: _change_db2_unique_constraint('create', constraint_name, *cols) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py0000664000567000056710000000217612701407047030434 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Index INDEX_NAME = 'checksum_image_idx' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = Table('images', meta, autoload=True) index = Index(INDEX_NAME, images.c.checksum) index.create(migrate_engine) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = Table('images', meta, autoload=True) index = Index(INDEX_NAME, images.c.checksum) index.drop(migrate_engine) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py0000664000567000056710000001506012701407047032362 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import encodeutils import six.moves.urllib.parse as urlparse import sqlalchemy from glance.common import exception from glance.i18n import _, _LE LOG = logging.getLogger(__name__) def upgrade(migrate_engine): migrate_location_credentials(migrate_engine, to_quoted=True) def downgrade(migrate_engine): migrate_location_credentials(migrate_engine, to_quoted=False) def migrate_location_credentials(migrate_engine, to_quoted): """ Migrate location credentials for swift uri's between the quoted and unquoted forms. :param migrate_engine: The configured db engine :param to_quoted: If True, migrate location credentials from unquoted to quoted form. If False, do the reverse. """ meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine images_table = sqlalchemy.Table('images', meta, autoload=True) images = list(images_table.select(images_table.c.location.startswith( 'swift')).execute()) for image in images: try: fixed_uri = legacy_parse_uri(image['location'], to_quoted) images_table.update().where( images_table.c.id == image['id']).values( location=fixed_uri).execute() except exception.BadStoreUri as e: reason = encodeutils.exception_to_unicode(e) msg = _LE("Invalid store uri for image: %(image_id)s. " "Details: %(reason)s") % {'image_id': image.id, 'reason': reason} LOG.exception(msg) raise def legacy_parse_uri(uri, to_quote): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj If to_quoted is True, the uri is assumed to have credentials that have not been quoted, and the resulting uri will contain quoted credentials. If to_quoted is False, the uri is assumed to have credentials that have been quoted, and the resulting uri will contain credentials that have not been quoted. """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _("URI cannot contain more than one occurrence of a scheme." "If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the swift+http:// scheme, " "like so: " "swift+http://user:pass@authurl.com/v1/container/obj") raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) if pieces.scheme not in ('swift', 'swift+http', 'swift+https'): raise exception.BadStoreUri(message="Unacceptable scheme: '%s'" % pieces.scheme) scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') # User can be account:user, in which case cred_parts[0:2] will be # the account and user. Combine them into a single username of # account:user if to_quote: if len(cred_parts) == 1: reason = (_("Badly formed credentials '%(creds)s' in Swift " "URI") % {'creds': creds}) raise exception.BadStoreUri(message=reason) elif len(cred_parts) == 3: user = ':'.join(cred_parts[0:2]) else: user = cred_parts[0] key = cred_parts[-1] user = user key = key else: if len(cred_parts) != 2: reason = (_("Badly formed credentials in Swift URI.")) raise exception.BadStoreUri(message=reason) user, key = cred_parts user = urlparse.unquote(user) key = urlparse.unquote(key) else: user = None key = None path_parts = path.split('/') try: obj = path_parts.pop() container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) auth_or_store_url = '/'.join(path_parts) except IndexError: reason = _("Badly formed S3 URI: %(uri)s") % {'uri': uri} raise exception.BadStoreUri(message=reason) if auth_or_store_url.startswith('http://'): auth_or_store_url = auth_or_store_url[len('http://'):] elif auth_or_store_url.startswith('https://'): auth_or_store_url = auth_or_store_url[len('https://'):] credstring = '' if user and key: if to_quote: quote_user = urlparse.quote(user) quote_key = urlparse.quote(key) else: quote_user = user quote_key = key credstring = '%s:%s@' % (quote_user, quote_key) auth_or_store_url = auth_or_store_url.strip('/') container = container.strip('/') obj = obj.strip('/') return '%s://%s%s/%s/%s' % (scheme, credstring, auth_or_store_url, container, obj) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py0000664000567000056710000000676512701407047031246 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from migrate.changeset import UniqueConstraint from oslo_db import exception as db_exception from sqlalchemy import and_, func, orm from sqlalchemy import MetaData, Table from sqlalchemy.exc import OperationalError, ProgrammingError NEW_KEYNAME = 'image_members_image_id_member_deleted_at_key' ORIGINAL_KEYNAME_RE = re.compile('image_members_image_id.*_key') def upgrade(migrate_engine): image_members = _get_image_members_table(migrate_engine) if migrate_engine.name in ('mysql', 'postgresql'): try: UniqueConstraint('image_id', name=_get_original_keyname(migrate_engine.name), table=image_members).drop() except (OperationalError, ProgrammingError, db_exception.DBError): UniqueConstraint('image_id', name=_infer_original_keyname(image_members), table=image_members).drop() UniqueConstraint('image_id', 'member', 'deleted_at', name=NEW_KEYNAME, table=image_members).create() def downgrade(migrate_engine): image_members = _get_image_members_table(migrate_engine) if migrate_engine.name in ('mysql', 'postgresql'): _sanitize(migrate_engine, image_members) UniqueConstraint('image_id', name=NEW_KEYNAME, table=image_members).drop() UniqueConstraint('image_id', 'member', name=_get_original_keyname(migrate_engine.name), table=image_members).create() def _get_image_members_table(migrate_engine): meta = MetaData() meta.bind = migrate_engine return Table('image_members', meta, autoload=True) def _get_original_keyname(db): return {'mysql': 'image_id', 'postgresql': 'image_members_image_id_member_key'}[db] def _infer_original_keyname(table): for i in table.indexes: if ORIGINAL_KEYNAME_RE.match(i.name): return i.name def _sanitize(migrate_engine, table): """ Avoid possible integrity error by removing deleted rows to accommodate less restrictive uniqueness constraint """ session = orm.sessionmaker(bind=migrate_engine)() # find the image_member rows containing duplicate combinations # of image_id and member qry = (session.query(table.c.image_id, table.c.member) .group_by(table.c.image_id, table.c.member) .having(func.count() > 1)) for image_id, member in qry: # only remove duplicate rows already marked deleted d = table.delete().where(and_(table.c.deleted == True, table.c.image_id == image_id, table.c.member == member)) d.execute() session.close() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/005_size_big_integer.py0000664000567000056710000000740412701407047030746 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import * # noqa from sqlalchemy import * # noqa from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, BigInteger, Integer, String, Text, from_migration_import) # noqa def get_images_table(meta): """ Returns the Table object for the images table that corresponds to the images table definition of this version. """ images = Table('images', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('name', String(255)), Column('disk_format', String(20)), Column('container_format', String(20)), Column('size', BigInteger()), Column('status', String(30), nullable=False), Column('is_public', Boolean(), nullable=False, default=False, index=True), Column('location', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), mysql_engine='InnoDB', extend_existing=True) return images def get_image_properties_table(meta): """ No changes to the image properties table from 002... """ (define_image_properties_table,) = from_migration_import( '002_add_image_properties_table', ['define_image_properties_table']) image_properties = define_image_properties_table(meta) return image_properties def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # No changes to SQLite stores are necessary, since # there is no BIG INTEGER type in SQLite. Unfortunately, # running the Python 005_size_big_integer.py migration script # on a SQLite datastore results in an error in the sa-migrate # code that does the workarounds for SQLite not having # ALTER TABLE MODIFY COLUMN ability dialect = migrate_engine.url.get_dialect().name if not dialect.startswith('sqlite'): (get_images_table,) = from_migration_import( '003_add_disk_format', ['get_images_table']) images = get_images_table(meta) images.columns['size'].alter(type=BigInteger()) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # No changes to SQLite stores are necessary, since # there is no BIG INTEGER type in SQLite. Unfortunately, # running the Python 005_size_big_integer.py migration script # on a SQLite datastore results in an error in the sa-migrate # code that does the workarounds for SQLite not having # ALTER TABLE MODIFY COLUMN ability dialect = migrate_engine.url.get_dialect().name if not dialect.startswith('sqlite'): images = get_images_table(meta) images.columns['size'].alter(type=Integer()) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py0000664000567000056710000000224712701407047032763 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from glance.db.sqlalchemy.migrate_repo import schema def get_images_table(meta): return sqlalchemy.Table('images', meta, autoload=True) def upgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) images_table = get_images_table(meta) images_table.columns['location'].drop() def downgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) images_table = get_images_table(meta) location = sqlalchemy.Column('location', schema.Text()) location.create(images_table) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/025_placeholder.py0000664000567000056710000000135612701407047027722 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): pass def downgrade(migration_engine): pass glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/035_add_metadef_tables.py0000664000567000056710000001775112701407047031216 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from sqlalchemy.schema import ( Column, ForeignKey, Index, MetaData, Table, UniqueConstraint) # noqa from glance.common import timeutils from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, Integer, String, Text, create_tables, drop_tables) # noqa RESOURCE_TYPES = [u'OS::Glance::Image', u'OS::Cinder::Volume', u'OS::Nova::Flavor', u'OS::Nova::Aggregate', u'OS::Nova::Server'] def _get_metadef_resource_types_table(meta): return sqlalchemy.Table('metadef_resource_types', meta, autoload=True) def _populate_resource_types(resource_types_table): now = timeutils.utcnow() for resource_type in RESOURCE_TYPES: values = { 'name': resource_type, 'protected': True, 'created_at': now, 'updated_at': now } resource_types_table.insert(values=values).execute() def define_metadef_namespaces_table(meta): # NOTE: For DB2 if UniqueConstraint is used when creating a table # an index will automatically be created. So, for DB2 specify the # index name up front. If not DB2 then create the Index. _constr_kwargs = {} if meta.bind.name == 'ibm_db_sa': _constr_kwargs['name'] = 'ix_namespaces_namespace' namespaces = Table('metadef_namespaces', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('namespace', String(80), nullable=False), Column('display_name', String(80)), Column('description', Text()), Column('visibility', String(32)), Column('protected', Boolean()), Column('owner', String(255), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), UniqueConstraint('namespace', **_constr_kwargs), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) if meta.bind.name != 'ibm_db_sa': Index('ix_namespaces_namespace', namespaces.c.namespace) return namespaces def define_metadef_objects_table(meta): _constr_kwargs = {} if meta.bind.name == 'ibm_db_sa': _constr_kwargs['name'] = 'ix_objects_namespace_id_name' objects = Table('metadef_objects', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('namespace_id', Integer(), ForeignKey('metadef_namespaces.id'), nullable=False), Column('name', String(80), nullable=False), Column('description', Text()), Column('required', Text()), Column('schema', Text(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), UniqueConstraint('namespace_id', 'name', **_constr_kwargs), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) if meta.bind.name != 'ibm_db_sa': Index('ix_objects_namespace_id_name', objects.c.namespace_id, objects.c.name) return objects def define_metadef_properties_table(meta): _constr_kwargs = {} if meta.bind.name == 'ibm_db_sa': _constr_kwargs['name'] = 'ix_metadef_properties_namespace_id_name' metadef_properties = Table( 'metadef_properties', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('namespace_id', Integer(), ForeignKey('metadef_namespaces.id'), nullable=False), Column('name', String(80), nullable=False), Column('schema', Text(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), UniqueConstraint('namespace_id', 'name', **_constr_kwargs), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) if meta.bind.name != 'ibm_db_sa': Index('ix_metadef_properties_namespace_id_name', metadef_properties.c.namespace_id, metadef_properties.c.name) return metadef_properties def define_metadef_resource_types_table(meta): _constr_kwargs = {} if meta.bind.name == 'ibm_db_sa': _constr_kwargs['name'] = 'ix_metadef_resource_types_name' metadef_res_types = Table( 'metadef_resource_types', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('name', String(80), nullable=False), Column('protected', Boolean(), nullable=False, default=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), UniqueConstraint('name', **_constr_kwargs), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) if meta.bind.name != 'ibm_db_sa': Index('ix_metadef_resource_types_name', metadef_res_types.c.name) return metadef_res_types def define_metadef_namespace_resource_types_table(meta): _constr_kwargs = {} if meta.bind.name == 'ibm_db_sa': _constr_kwargs['name'] = 'ix_metadef_ns_res_types_res_type_id_ns_id' metadef_associations = Table( 'metadef_namespace_resource_types', meta, Column('resource_type_id', Integer(), ForeignKey('metadef_resource_types.id'), primary_key=True, nullable=False), Column('namespace_id', Integer(), ForeignKey('metadef_namespaces.id'), primary_key=True, nullable=False), Column('properties_target', String(80)), Column('prefix', String(80)), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), UniqueConstraint('resource_type_id', 'namespace_id', **_constr_kwargs), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) if meta.bind.name != 'ibm_db_sa': Index('ix_metadef_ns_res_types_res_type_id_ns_id', metadef_associations.c.resource_type_id, metadef_associations.c.namespace_id) return metadef_associations def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_metadef_namespaces_table(meta), define_metadef_objects_table(meta), define_metadef_properties_table(meta), define_metadef_resource_types_table(meta), define_metadef_namespace_resource_types_table(meta)] create_tables(tables) resource_types_table = _get_metadef_resource_types_table(meta) _populate_resource_types(resource_types_table) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_metadef_objects_table(meta), define_metadef_properties_table(meta), define_metadef_namespace_resource_types_table(meta), define_metadef_resource_types_table(meta), define_metadef_namespaces_table(meta)] drop_tables(tables) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py0000664000567000056710000000505712701407047034674 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import pickle import sqlalchemy from sqlalchemy import MetaData, Table, Column # noqa from glance.db.sqlalchemy import models def upgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) image_locations = Table('image_locations', meta, autoload=True) new_meta_data = Column('storage_meta_data', models.JSONEncodedDict, default={}) new_meta_data.create(image_locations) noe = pickle.dumps({}) s = sqlalchemy.sql.select([image_locations]).where( image_locations.c.meta_data != noe) conn = migrate_engine.connect() res = conn.execute(s) for row in res: meta_data = row['meta_data'] x = pickle.loads(meta_data) if x != {}: stmt = image_locations.update().where( image_locations.c.id == row['id']).values(storage_meta_data=x) conn.execute(stmt) conn.close() image_locations.columns['meta_data'].drop() image_locations.columns['storage_meta_data'].alter(name='meta_data') def downgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) image_locations = Table('image_locations', meta, autoload=True) old_meta_data = Column('old_meta_data', sqlalchemy.PickleType(), default={}) old_meta_data.create(image_locations) noj = json.dumps({}) s = sqlalchemy.sql.select([image_locations]).where( image_locations.c.meta_data != noj) conn = migrate_engine.connect() res = conn.execute(s) for row in res: x = row['meta_data'] meta_data = json.loads(x) if meta_data != {}: stmt = image_locations.update().where( image_locations.c.id == row['id']).values( old_meta_data=meta_data) conn.execute(stmt) conn.close() image_locations.columns['meta_data'].drop() image_locations.columns['old_meta_data'].alter(name='meta_data') glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql0000664000567000056710000000303712701407047031134 0ustar jenkinsjenkins00000000000000-- Make changes to the base images table CREATE TEMPORARY TABLE images_backup ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id) ); INSERT INTO images_backup SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted FROM images; DROP TABLE images; CREATE TABLE images ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, type VARCHAR(30), status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); CREATE INDEX ix_images_deleted ON images (deleted); CREATE INDEX ix_images_is_public ON images (is_public); INSERT INTO images (id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted) SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted FROM images_backup; DROP TABLE images_backup; -- Re-insert the type values from the temp table UPDATE images SET type = (SELECT value FROM image_properties WHERE image_id = images.id AND key = 'type') WHERE EXISTS (SELECT * FROM image_properties WHERE image_id = images.id AND key = 'type'); -- Remove the type properties from the image_properties table DELETE FROM image_properties WHERE key = 'type'; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/043_add_image_created_updated_idx.py0000664000567000056710000000203512701407047033366 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Index CREATED_AT_INDEX = 'created_at_image_idx' UPDATED_AT_INDEX = 'updated_at_image_idx' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = Table('images', meta, autoload=True) created_index = Index(CREATED_AT_INDEX, images.c.created_at) created_index.create(migrate_engine) updated_index = Index(UPDATED_AT_INDEX, images.c.updated_at) updated_index.create(migrate_engine) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/023_placeholder.py0000664000567000056710000000135612701407047027720 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): pass def downgrade(migration_engine): pass glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/037_sqlite_downgrade.sql0000664000567000056710000000773612701407047031155 0ustar jenkinsjenkins00000000000000CREATE TEMPORARY TABLE images_backup ( id VARCHAR(36) NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER, min_ram INTEGER, protected BOOLEAN, virtual_size INTEGER, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); INSERT INTO images_backup SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, virtual_size, protected FROM images; DROP TABLE images; CREATE TABLE images ( id VARCHAR(36) NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER NOT NULL, min_ram INTEGER NOT NULL, protected BOOLEAN, virtual_size INTEGER, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); CREATE INDEX owner_image_idx ON images (owner); CREATE INDEX checksum_image_idx ON images (checksum); INSERT INTO images SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, protected, virtual_size FROM images_backup; DROP TABLE images_backup; CREATE TEMPORARY TABLE image_members_backup ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, member VARCHAR(255) NOT NULL, can_share BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, status VARCHAR(20), PRIMARY KEY (id), UNIQUE (image_id, member), CHECK (can_share IN (0, 1)), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id) ); INSERT INTO image_members_backup SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status FROM image_members; DROP TABLE image_members; CREATE TABLE image_members ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, member VARCHAR(255) NOT NULL, can_share BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, status VARCHAR(20), PRIMARY KEY (id), UNIQUE (image_id, member), CHECK (can_share IN (0, 1)), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id) ); INSERT INTO image_members SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status FROM image_members_backup; DROP TABLE image_members_backup; CREATE TEMPORARY TABLE image_properties_backup ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, name VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id) ); INSERT INTO image_properties_backup SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted FROM image_properties; DROP TABLE image_properties; CREATE TABLE image_properties ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, name VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), CHECK (deleted IN (0, 1)), UNIQUE (image_id, name), FOREIGN KEY(image_id) REFERENCES images (id) ); CREATE INDEX ix_image_properties_name ON image_properties (name); INSERT INTO image_properties (id, image_id, name, value, created_at, updated_at, deleted_at, deleted) SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted FROM image_properties_backup; DROP TABLE image_properties_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/__init__.py0000664000567000056710000000000012701407047026572 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py0000664000567000056710000002417712701407047031403 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import (Column, ForeignKey, Index, MetaData, Table) from glance.db.sqlalchemy.migrate_repo.schema import ( BigInteger, Boolean, DateTime, Integer, Numeric, String, Text, create_tables) # noqa def define_artifacts_table(meta): artifacts = Table('artifacts', meta, Column('id', String(36), primary_key=True, nullable=False), Column('name', String(255), nullable=False), Column('type_name', String(255), nullable=False), Column('type_version_prefix', BigInteger(), nullable=False), Column('type_version_suffix', String(255)), Column('type_version_meta', String(255)), Column('version_prefix', BigInteger(), nullable=False), Column('version_suffix', String(255)), Column('version_meta', String(255)), Column('description', Text()), Column('visibility', String(32), nullable=False), Column('state', String(32), nullable=False), Column('owner', String(255), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('deleted_at', DateTime()), Column('published_at', DateTime()), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) Index('ix_artifact_name_and_version', artifacts.c.name, artifacts.c.version_prefix, artifacts.c.version_suffix) Index('ix_artifact_type', artifacts.c.type_name, artifacts.c.type_version_prefix, artifacts.c.type_version_suffix) Index('ix_artifact_state', artifacts.c.state) Index('ix_artifact_owner', artifacts.c.owner) Index('ix_artifact_visibility', artifacts.c.visibility) return artifacts def define_artifact_tags_table(meta): artifact_tags = Table('artifact_tags', meta, Column('id', String(36), primary_key=True, nullable=False), Column('artifact_id', String(36), ForeignKey('artifacts.id'), nullable=False), Column('value', String(255), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) Index('ix_artifact_tags_artifact_id', artifact_tags.c.artifact_id) Index('ix_artifact_tags_artifact_id_tag_value', artifact_tags.c.artifact_id, artifact_tags.c.value) return artifact_tags def define_artifact_dependencies_table(meta): artifact_dependencies = Table('artifact_dependencies', meta, Column('id', String(36), primary_key=True, nullable=False), Column('artifact_source', String(36), ForeignKey('artifacts.id'), nullable=False), Column('artifact_dest', String(36), ForeignKey('artifacts.id'), nullable=False), Column('artifact_origin', String(36), ForeignKey('artifacts.id'), nullable=False), Column('is_direct', Boolean(), nullable=False), Column('position', Integer()), Column('name', String(36)), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) Index('ix_artifact_dependencies_source_id', artifact_dependencies.c.artifact_source) Index('ix_artifact_dependencies_dest_id', artifact_dependencies.c.artifact_dest), Index('ix_artifact_dependencies_origin_id', artifact_dependencies.c.artifact_origin) Index('ix_artifact_dependencies_direct_dependencies', artifact_dependencies.c.artifact_source, artifact_dependencies.c.is_direct) return artifact_dependencies def define_artifact_blobs_table(meta): artifact_blobs = Table('artifact_blobs', meta, Column('id', String(36), primary_key=True, nullable=False), Column('artifact_id', String(36), ForeignKey('artifacts.id'), nullable=False), Column('size', BigInteger(), nullable=False), Column('checksum', String(32)), Column('name', String(255), nullable=False), Column('item_key', String(329)), Column('position', Integer()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) Index('ix_artifact_blobs_artifact_id', artifact_blobs.c.artifact_id) Index('ix_artifact_blobs_name', artifact_blobs.c.name) return artifact_blobs def define_artifact_properties_table(meta): artifact_properties = Table('artifact_properties', meta, Column('id', String(36), primary_key=True, nullable=False), Column('artifact_id', String(36), ForeignKey('artifacts.id'), nullable=False), Column('name', String(255), nullable=False), Column('string_value', String(255)), Column('int_value', Integer()), Column('numeric_value', Numeric()), Column('bool_value', Boolean()), Column('text_value', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('position', Integer()), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) Index('ix_artifact_properties_artifact_id', artifact_properties.c.artifact_id) Index('ix_artifact_properties_name', artifact_properties.c.name) return artifact_properties def define_artifact_blob_locations_table(meta): artifact_blob_locations = Table('artifact_blob_locations', meta, Column('id', String(36), primary_key=True, nullable=False), Column('blob_id', String(36), ForeignKey('artifact_blobs.id'), nullable=False), Column('value', Text(), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime(), nullable=False), Column('position', Integer()), Column('status', String(36), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) Index('ix_artifact_blob_locations_blob_id', artifact_blob_locations.c.blob_id) return artifact_blob_locations def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_artifacts_table(meta), define_artifact_tags_table(meta), define_artifact_properties_table(meta), define_artifact_blobs_table(meta), define_artifact_blob_locations_table(meta), define_artifact_dependencies_table(meta)] create_tables(tables) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/024_placeholder.py0000664000567000056710000000135612701407047027721 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def upgrade(migrate_engine): pass def downgrade(migration_engine): pass glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/028_owner_index.py0000664000567000056710000000216512701407047027763 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Index INDEX_NAME = 'owner_image_idx' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = Table('images', meta, autoload=True) index = Index(INDEX_NAME, images.c.owner) index.create(migrate_engine) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = Table('images', meta, autoload=True) index = Index(INDEX_NAME, images.c.owner) index.drop(migrate_engine) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/011_make_mindisk_and_minram_notnull.py0000664000567000056710000000212112701407047034015 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy meta = sqlalchemy.MetaData() def upgrade(migrate_engine): meta.bind = migrate_engine images = sqlalchemy.Table('images', meta, autoload=True) images.c.min_disk.alter(nullable=False) images.c.min_ram.alter(nullable=False) def downgrade(migrate_engine): meta.bind = migrate_engine images = sqlalchemy.Table('images', meta, autoload=True) images.c.min_disk.alter(nullable=True) images.c.min_ram.alter(nullable=True) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/003_add_disk_format.py0000664000567000056710000001306312701407047030544 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import * # noqa from sqlalchemy import * # noqa from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa def get_images_table(meta): """ Returns the Table object for the images table that corresponds to the images table definition of this version. """ images = Table('images', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('name', String(255)), Column('disk_format', String(20)), Column('container_format', String(20)), Column('size', Integer()), Column('status', String(30), nullable=False), Column('is_public', Boolean(), nullable=False, default=False, index=True), Column('location', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), mysql_engine='InnoDB', extend_existing=True) return images def get_image_properties_table(meta): """ No changes to the image properties table from 002... """ (define_image_properties_table,) = from_migration_import( '002_add_image_properties_table', ['define_image_properties_table']) image_properties = define_image_properties_table(meta) return image_properties def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine (define_images_table,) = from_migration_import( '001_add_images_table', ['define_images_table']) (define_image_properties_table,) = from_migration_import( '002_add_image_properties_table', ['define_image_properties_table']) conn = migrate_engine.connect() images = define_images_table(meta) image_properties = define_image_properties_table(meta) # Steps to take, in this order: # 1) Move the existing type column from Image into # ImageProperty for all image records that have a non-NULL # type column # 2) Drop the type column in images # 3) Add the new columns to images # The below wackiness correlates to the following ANSI SQL: # SELECT images.* FROM images # LEFT JOIN image_properties # ON images.id = image_properties.image_id # AND image_properties.key = 'type' # WHERE image_properties.image_id IS NULL # AND images.type IS NOT NULL # # which returns all the images that have a type set # but that DO NOT yet have an image_property record # with key of type. from_stmt = [ images.outerjoin(image_properties, and_(images.c.id == image_properties.c.image_id, image_properties.c.key == 'type')) ] and_stmt = and_(image_properties.c.image_id == None, images.c.type != None) sel = select([images], from_obj=from_stmt).where(and_stmt) image_records = conn.execute(sel).fetchall() property_insert = image_properties.insert() for record in image_records: conn.execute(property_insert, image_id=record.id, key='type', created_at=record.created_at, deleted=False, value=record.type) conn.close() disk_format = Column('disk_format', String(20)) disk_format.create(images) container_format = Column('container_format', String(20)) container_format.create(images) images.columns['type'].drop() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Steps to take, in this order: # 1) Add type column back to Image # 2) Move the existing type properties from ImageProperty into # Image.type # 3) Drop the disk_format and container_format columns in Image conn = migrate_engine.connect() images = get_images_table(meta) image_properties = get_image_properties_table(meta) type_col = Column('type', String(30)) type_col.create(images) sel = select([image_properties]).where(image_properties.c.key == 'type') type_property_records = conn.execute(sel).fetchall() for record in type_property_records: upd = images.update().where( images.c.id == record.image_id).values(type=record.value) conn.execute(upd) dlt = image_properties.delete().where( image_properties.c.image_id == record.image_id) conn.execute(dlt) conn.close() images.columns['disk_format'].drop() images.columns['container_format'].drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/008_add_image_members_table.py0000664000567000056710000000670212701407047032214 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import * # noqa from sqlalchemy import * # noqa from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, Integer, String, create_tables, drop_tables, from_migration_import) # noqa def get_images_table(meta): """ No changes to the images table from 007... """ (get_images_table,) = from_migration_import( '007_add_owner', ['get_images_table']) images = get_images_table(meta) return images def get_image_properties_table(meta): """ No changes to the image properties table from 007... """ (get_image_properties_table,) = from_migration_import( '007_add_owner', ['get_image_properties_table']) image_properties = get_image_properties_table(meta) return image_properties def get_image_members_table(meta): images = get_images_table(meta) # noqa image_members = Table('image_members', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('image_id', Integer(), ForeignKey('images.id'), nullable=False, index=True), Column('member', String(255), nullable=False), Column('can_share', Boolean(), nullable=False, default=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), UniqueConstraint('image_id', 'member'), mysql_charset='utf8', mysql_engine='InnoDB', extend_existing=True) # DB2: an index has already been created for the UniqueConstraint option # specified on the Table() statement above. if meta.bind.name != "ibm_db_sa": Index('ix_image_members_image_id_member', image_members.c.image_id, image_members.c.member) return image_members def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [get_image_members_table(meta)] create_tables(tables) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [get_image_members_table(meta)] drop_tables(tables) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/004_add_checksum.py0000664000567000056710000000547612701407047030056 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import * # noqa from sqlalchemy import * # noqa from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa def get_images_table(meta): """ Returns the Table object for the images table that corresponds to the images table definition of this version. """ images = Table('images', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('name', String(255)), Column('disk_format', String(20)), Column('container_format', String(20)), Column('size', Integer()), Column('status', String(30), nullable=False), Column('is_public', Boolean(), nullable=False, default=False, index=True), Column('location', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), Column('checksum', String(32)), mysql_engine='InnoDB', extend_existing=True) return images def get_image_properties_table(meta): """ No changes to the image properties table from 002... """ (define_image_properties_table,) = from_migration_import( '002_add_image_properties_table', ['define_image_properties_table']) image_properties = define_image_properties_table(meta) return image_properties def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = get_images_table(meta) checksum = Column('checksum', String(32)) checksum.create(images) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = get_images_table(meta) images.columns['checksum'].drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/038_add_metadef_tags_table.py0000664000567000056710000000421212701407047032040 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import ( Column, Index, MetaData, Table, UniqueConstraint) # noqa from glance.db.sqlalchemy.migrate_repo.schema import ( DateTime, Integer, String, create_tables, drop_tables) # noqa def define_metadef_tags_table(meta): _constr_kwargs = {} metadef_tags = Table('metadef_tags', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('namespace_id', Integer(), nullable=False), Column('name', String(80), nullable=False), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), UniqueConstraint('namespace_id', 'name', **_constr_kwargs), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=False) if meta.bind.name != 'ibm_db_sa': Index('ix_tags_namespace_id_name', metadef_tags.c.namespace_id, metadef_tags.c.name) return metadef_tags def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_metadef_tags_table(meta)] create_tables(tables) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_metadef_tags_table(meta)] drop_tables(tables) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py0000664000567000056710000000275212701407047034213 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from glance.db.sqlalchemy.migrate_repo import schema def upgrade(migrate_engine): meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine image_locations_table = sqlalchemy.Table('image_locations', meta, autoload=True) meta_data = sqlalchemy.Column('meta_data', schema.PickleType(), default={}) meta_data.create(image_locations_table) def downgrade(migrate_engine): meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine image_locations_table = sqlalchemy.Table('image_locations', meta, autoload=True) image_locations_table.columns['meta_data'].drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql0000664000567000056710000000225112701407047031134 0ustar jenkinsjenkins00000000000000-- -- This is necessary because SQLite does not support -- RENAME INDEX or ALTER TABLE CHANGE COLUMN. -- CREATE TEMPORARY TABLE image_properties_backup ( id INTEGER NOT NULL, image_id INTEGER NOT NULL, key VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id) ); INSERT INTO image_properties_backup SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted FROM image_properties; DROP TABLE image_properties; CREATE TABLE image_properties ( id INTEGER NOT NULL, image_id INTEGER NOT NULL, key VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), CHECK (deleted IN (0, 1)), UNIQUE (image_id, key), FOREIGN KEY(image_id) REFERENCES images (id) ); CREATE INDEX ix_image_properties_key ON image_properties (key); INSERT INTO image_properties (id, image_id, key, value, created_at, updated_at, deleted_at, deleted) SELECT id, image_id, key, value, created_at, updated_at, deleted_at, deleted FROM image_properties_backup; DROP TABLE image_properties_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/016_add_status_image_member.py0000664000567000056710000000211712701407047032260 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Column, String meta = MetaData() status = Column('status', String(20), default="pending") def upgrade(migrate_engine): meta.bind = migrate_engine image_members = Table('image_members', meta, autoload=True) image_members.create_column(status) def downgrade(migrate_engine): meta.bind = migrate_engine image_members = Table('image_members', meta, autoload=True) image_members.drop_column(status) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py0000664000567000056710000000220512701407047030762 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy def upgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine images = sqlalchemy.Table('images', meta, autoload=True) virtual_size = sqlalchemy.Column('virtual_size', sqlalchemy.BigInteger) images.create_column(virtual_size) def downgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine images = sqlalchemy.Table('images', meta, autoload=True) images.columns['virtual_size'].drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py0000664000567000056710000000253312701407047033454 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import MetaData from sqlalchemy.schema import Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) metadef_objects = Table('metadef_objects', meta, autoload=True) metadef_objects.c.schema.alter(name='json_schema') metadef_properties = Table('metadef_properties', meta, autoload=True) metadef_properties.c.schema.alter(name='json_schema') def downgrade(migrate_engine): meta = MetaData(bind=migrate_engine) metadef_objects = Table('metadef_objects', meta, autoload=True) metadef_objects.c.json_schema.alter(name='schema') metadef_properties = Table('metadef_properties', meta, autoload=True) metadef_properties.c.json_schema.alter(name='schema') glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/019_migrate_image_locations.py0000664000567000056710000000373112701407047032307 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy def get_images_table(meta): return sqlalchemy.Table('images', meta, autoload=True) def get_image_locations_table(meta): return sqlalchemy.Table('image_locations', meta, autoload=True) def upgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) images_table = get_images_table(meta) image_locations_table = get_image_locations_table(meta) image_records = images_table.select().execute().fetchall() for image in image_records: if image.location is not None: values = { 'image_id': image.id, 'value': image.location, 'created_at': image.created_at, 'updated_at': image.updated_at, 'deleted': image.deleted, 'deleted_at': image.deleted_at, } image_locations_table.insert(values=values).execute() def downgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) images_table = get_images_table(meta) image_locations_table = get_image_locations_table(meta) image_records = image_locations_table.select().execute().fetchall() for image_location in image_records: images_table.update( values={'location': image_location.value}).where( images_table.c.id == image_location.image_id).execute() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql0000664000567000056710000000322212701407047031131 0ustar jenkinsjenkins00000000000000-- -- This is necessary because sqlalchemy has various bugs preventing -- downgrades from working correctly. -- CREATE TEMPORARY TABLE images_backup ( id VARCHAR(36) NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER NOT NULL, min_ram INTEGER NOT NULL, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); INSERT INTO images_backup SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram FROM images; DROP TABLE images; CREATE TABLE images ( id VARCHAR(36) NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER NOT NULL, min_ram INTEGER NOT NULL, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); CREATE INDEX ix_images_is_public ON images (is_public); CREATE INDEX ix_images_deleted ON images (deleted); INSERT INTO images SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram FROM images_backup; DROP TABLE images_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py0000664000567000056710000000657212701407047032755 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import ( Column, ForeignKey, Index, MetaData, Table, UniqueConstraint) from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, Integer, String, Text, create_tables, drop_tables, from_migration_import) # noqa def define_image_properties_table(meta): (define_images_table,) = from_migration_import( '001_add_images_table', ['define_images_table']) images = define_images_table(meta) # noqa # NOTE(dperaza) DB2: specify the UniqueConstraint option when creating the # table will cause an index being created to specify the index # name and skip the step of creating another index with the same columns. # The index name is needed so it can be dropped and re-created later on. constr_kwargs = {} if meta.bind.name == 'ibm_db_sa': constr_kwargs['name'] = 'ix_image_properties_image_id_key' image_properties = Table('image_properties', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('image_id', Integer(), ForeignKey('images.id'), nullable=False, index=True), Column('key', String(255), nullable=False), Column('value', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), UniqueConstraint('image_id', 'key', **constr_kwargs), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) if meta.bind.name != 'ibm_db_sa': Index('ix_image_properties_image_id_key', image_properties.c.image_id, image_properties.c.key) return image_properties def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_image_properties_table(meta)] create_tables(tables) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_image_properties_table(meta)] drop_tables(tables) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/030_add_tasks_table.py0000664000567000056710000000457212701407047030543 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import (Column, MetaData, Table, Index) from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, String, Text, create_tables, drop_tables) # noqa def define_tasks_table(meta): tasks = Table('tasks', meta, Column('id', String(36), primary_key=True, nullable=False), Column('type', String(30), nullable=False), Column('status', String(30), nullable=False), Column('owner', String(255), nullable=False), Column('input', Text()), # json blob Column('result', Text()), # json blob Column('message', Text()), Column('expires_at', DateTime(), nullable=True), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) Index('ix_tasks_type', tasks.c.type) Index('ix_tasks_status', tasks.c.status) Index('ix_tasks_owner', tasks.c.owner) Index('ix_tasks_deleted', tasks.c.deleted) Index('ix_tasks_updated_at', tasks.c.updated_at) return tasks def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_tasks_table(meta)] create_tables(tables) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_tasks_table(meta)] drop_tables(tables) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/003_sqlite_upgrade.sql0000664000567000056710000000321012701407047030602 0ustar jenkinsjenkins00000000000000-- Move type column from base images table -- to be records in image_properties table CREATE TEMPORARY TABLE tmp_type_records (id INTEGER NOT NULL, type VARCHAR(30) NOT NULL); INSERT INTO tmp_type_records SELECT id, type FROM images WHERE type IS NOT NULL; REPLACE INTO image_properties (image_id, key, value, created_at, deleted) SELECT id, 'type', type, date('now'), 0 FROM tmp_type_records; DROP TABLE tmp_type_records; -- Make changes to the base images table CREATE TEMPORARY TABLE images_backup ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id) ); INSERT INTO images_backup SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted FROM images; DROP TABLE images; CREATE TABLE images ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); CREATE INDEX ix_images_deleted ON images (deleted); CREATE INDEX ix_images_is_public ON images (is_public); INSERT INTO images (id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted) SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted FROM images_backup; DROP TABLE images_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/033_add_location_status.py0000664000567000056710000000377112701407047031465 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six import sqlalchemy from glance.db.sqlalchemy.migrate_repo import schema def upgrade(migrate_engine): meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine images_table = sqlalchemy.Table('images', meta, autoload=True) image_locations_table = sqlalchemy.Table('image_locations', meta, autoload=True) # Create 'status' column for image_locations table status = sqlalchemy.Column('status', schema.String(30), server_default='active', nullable=False) status.create(image_locations_table) # Set 'status' column initial value for image_locations table mapping = {'active': 'active', 'pending_delete': 'pending_delete', 'deleted': 'deleted', 'killed': 'deleted'} for src, dst in six.iteritems(mapping): subq = sqlalchemy.sql.select([images_table.c.id]).where( images_table.c.status == src) image_locations_table.update(values={'status': dst}).where( image_locations_table.c.image_id.in_(subq)).execute() def downgrade(migrate_engine): meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine image_locations_table = sqlalchemy.Table('image_locations', meta, autoload=True) # Remove 'status' column from image_locations table image_locations_table.columns['status'].drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/016_sqlite_downgrade.sql0000664000567000056710000000232312701407047031135 0ustar jenkinsjenkins00000000000000CREATE TEMPORARY TABLE image_members_backup ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, member VARCHAR(255) NOT NULL, can_share BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), UNIQUE (image_id, member), CHECK (can_share IN (0, 1)), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id) ); INSERT INTO image_members_backup SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted FROM image_members; DROP TABLE image_members; CREATE TABLE image_members ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, member VARCHAR(255) NOT NULL, can_share BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), UNIQUE (image_id, member), CHECK (can_share IN (0, 1)), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id) ); INSERT INTO image_members SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted FROM image_members_backup; DROP TABLE image_members_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/007_add_owner.py0000664000567000056710000000553712701407047027407 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import * # noqa from sqlalchemy import * # noqa from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, BigInteger, Integer, String, Text, from_migration_import) # noqa def get_images_table(meta): """ Returns the Table object for the images table that corresponds to the images table definition of this version. """ images = Table('images', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('name', String(255)), Column('disk_format', String(20)), Column('container_format', String(20)), Column('size', BigInteger()), Column('status', String(30), nullable=False), Column('is_public', Boolean(), nullable=False, default=False, index=True), Column('location', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), Column('checksum', String(32)), Column('owner', String(255)), mysql_engine='InnoDB', extend_existing=True) return images def get_image_properties_table(meta): """ No changes to the image properties table from 006... """ (get_image_properties_table,) = from_migration_import( '006_key_to_name', ['get_image_properties_table']) image_properties = get_image_properties_table(meta) return image_properties def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = get_images_table(meta) owner = Column('owner', String(255)) owner.create(images) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = get_images_table(meta) images.columns['owner'].drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py0000664000567000056710000000452512701407047032557 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from glance.db.sqlalchemy.migrate_repo import schema def upgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) # NOTE(bcwaldon): load the images table for the ForeignKey below sqlalchemy.Table('images', meta, autoload=True) image_locations_table = sqlalchemy.Table( 'image_locations', meta, sqlalchemy.Column('id', schema.Integer(), primary_key=True, nullable=False), sqlalchemy.Column('image_id', schema.String(36), sqlalchemy.ForeignKey('images.id'), nullable=False, index=True), sqlalchemy.Column('value', schema.Text(), nullable=False), sqlalchemy.Column('created_at', schema.DateTime(), nullable=False), sqlalchemy.Column('updated_at', schema.DateTime()), sqlalchemy.Column('deleted_at', schema.DateTime()), sqlalchemy.Column('deleted', schema.Boolean(), nullable=False, default=False, index=True), mysql_engine='InnoDB', mysql_charset='utf8', ) schema.create_tables([image_locations_table]) def downgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) image_locations_table = sqlalchemy.Table('image_locations', meta, autoload=True) schema.drop_tables([image_locations_table]) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/031_remove_duplicated_locations.py0000664000567000056710000000600012701407047033172 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from sqlalchemy import func from sqlalchemy import orm from sqlalchemy import sql from sqlalchemy import Table def upgrade(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) image_locations = Table('image_locations', meta, autoload=True) if migrate_engine.name == "ibm_db_sa": il = orm.aliased(image_locations) # NOTE(wenchma): Get all duplicated rows. qry = (sql.select([il.c.id]) .where(il.c.id > (sql.select([func.min(image_locations.c.id)]) .where(image_locations.c.image_id == il.c.image_id) .where(image_locations.c.value == il.c.value) .where(image_locations.c.meta_data == il.c.meta_data) .where(image_locations.c.deleted == False))) .where(il.c.deleted == False) .execute() ) for row in qry: stmt = (image_locations.delete() .where(image_locations.c.id == row[0]) .where(image_locations.c.deleted == False)) stmt.execute() else: session = orm.sessionmaker(bind=migrate_engine)() # NOTE(flaper87): Lets group by # image_id, location and metadata. grp = [image_locations.c.image_id, image_locations.c.value, image_locations.c.meta_data] # NOTE(flaper87): Get all duplicated rows qry = (session.query(*grp) .filter(image_locations.c.deleted == False) .group_by(*grp) .having(func.count() > 1)) for row in qry: # NOTE(flaper87): Not the fastest way to do it. # This is the best way to do it since sqlalchemy # has a bug around delete + limit. s = (sql.select([image_locations.c.id]) .where(image_locations.c.image_id == row[0]) .where(image_locations.c.value == row[1]) .where(image_locations.c.meta_data == row[2]) .where(image_locations.c.deleted == False) .limit(1).execute()) stmt = (image_locations.delete() .where(image_locations.c.id == s.first()[0])) stmt.execute() session.close() def downgrade(migrate_engine): # NOTE(flaper87): There's no downgrade # path for this. return glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/006_sqlite_upgrade.sql0000664000567000056710000000236712701407047030621 0ustar jenkinsjenkins00000000000000-- -- This is necessary because SQLite does not support -- RENAME INDEX or ALTER TABLE CHANGE COLUMN. -- CREATE TEMPORARY TABLE image_properties_backup ( id INTEGER NOT NULL, image_id INTEGER NOT NULL, name VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id) ); INSERT INTO image_properties_backup SELECT id, image_id, key, value, created_at, updated_at, deleted_at, deleted FROM image_properties; DROP TABLE image_properties; CREATE TABLE image_properties ( id INTEGER NOT NULL, image_id INTEGER NOT NULL, name VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), CHECK (deleted IN (0, 1)), UNIQUE (image_id, name), FOREIGN KEY(image_id) REFERENCES images (id) ); CREATE INDEX ix_image_properties_name ON image_properties (name); CREATE INDEX ix_image_properties_deleted ON image_properties (deleted); INSERT INTO image_properties (id, image_id, name, value, created_at, updated_at, deleted_at, deleted) SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted FROM image_properties_backup; DROP TABLE image_properties_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/012_id_to_uuid.py0000664000567000056710000005123512701407047027561 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ While SQLAlchemy/sqlalchemy-migrate should abstract this correctly, there are known issues with these libraries so SQLite and non-SQLite migrations must be done separately. """ import uuid import migrate import sqlalchemy and_ = sqlalchemy.and_ or_ = sqlalchemy.or_ def upgrade(migrate_engine): """ Call the correct dialect-specific upgrade. """ meta = sqlalchemy.MetaData() meta.bind = migrate_engine t_images = _get_table('images', meta) t_image_members = _get_table('image_members', meta) t_image_properties = _get_table('image_properties', meta) dialect = migrate_engine.url.get_dialect().name if dialect == "sqlite": _upgrade_sqlite(meta, t_images, t_image_members, t_image_properties) _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties) elif dialect == "ibm_db_sa": _upgrade_db2(meta, t_images, t_image_members, t_image_properties) _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties) _add_db2_constraints(meta) else: _upgrade_other(t_images, t_image_members, t_image_properties, dialect) def downgrade(migrate_engine): """ Call the correct dialect-specific downgrade. """ meta = sqlalchemy.MetaData() meta.bind = migrate_engine t_images = _get_table('images', meta) t_image_members = _get_table('image_members', meta) t_image_properties = _get_table('image_properties', meta) dialect = migrate_engine.url.get_dialect().name if dialect == "sqlite": _update_all_uuids_to_ids(t_images, t_image_members, t_image_properties) _downgrade_sqlite(meta, t_images, t_image_members, t_image_properties) elif dialect == "ibm_db_sa": _remove_db2_constraints(meta) _update_all_uuids_to_ids(t_images, t_image_members, t_image_properties) _downgrade_db2(meta, t_images, t_image_members, t_image_properties) else: _downgrade_other(t_images, t_image_members, t_image_properties, dialect) def _upgrade_sqlite(meta, t_images, t_image_members, t_image_properties): """ Upgrade 011 -> 012 with special SQLite-compatible logic. """ sql_commands = [ """CREATE TABLE images_backup ( id VARCHAR(36) NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER NOT NULL, min_ram INTEGER NOT NULL, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) );""", """INSERT INTO images_backup SELECT * FROM images;""", """CREATE TABLE image_members_backup ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, member VARCHAR(255) NOT NULL, can_share BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), UNIQUE (image_id, member), CHECK (can_share IN (0, 1)), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id) );""", """INSERT INTO image_members_backup SELECT * FROM image_members;""", """CREATE TABLE image_properties_backup ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, name VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), CHECK (deleted IN (0, 1)), UNIQUE (image_id, name), FOREIGN KEY(image_id) REFERENCES images (id) );""", """INSERT INTO image_properties_backup SELECT * FROM image_properties;""", ] for command in sql_commands: meta.bind.execute(command) _sqlite_table_swap(meta, t_image_members, t_image_properties, t_images) def _upgrade_db2(meta, t_images, t_image_members, t_image_properties): """ Upgrade for DB2. """ t_images.c.id.alter(sqlalchemy.String(36), primary_key=True) image_members_backup = sqlalchemy.Table( 'image_members_backup', meta, sqlalchemy.Column('id', sqlalchemy.Integer(), primary_key=True, nullable=False), sqlalchemy.Column('image_id', sqlalchemy.String(36), nullable=False, index=True), sqlalchemy.Column('member', sqlalchemy.String(255), nullable=False), sqlalchemy.Column('can_share', sqlalchemy.Boolean(), nullable=False, default=False), sqlalchemy.Column('created_at', sqlalchemy.DateTime(), nullable=False), sqlalchemy.Column('updated_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted', sqlalchemy.Boolean(), nullable=False, default=False, index=True), sqlalchemy.UniqueConstraint('image_id', 'member'), extend_existing=True) image_properties_backup = sqlalchemy.Table( 'image_properties_backup', meta, sqlalchemy.Column('id', sqlalchemy.Integer(), primary_key=True, nullable=False), sqlalchemy.Column('image_id', sqlalchemy.String(36), nullable=False, index=True), sqlalchemy.Column('name', sqlalchemy.String(255), nullable=False), sqlalchemy.Column('value', sqlalchemy.Text()), sqlalchemy.Column('created_at', sqlalchemy.DateTime(), nullable=False), sqlalchemy.Column('updated_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted', sqlalchemy.Boolean(), nullable=False, default=False, index=True), sqlalchemy.UniqueConstraint( 'image_id', 'name', name='ix_image_properties_image_id_name'), extend_existing=True) image_members_backup.create() image_properties_backup.create() sql_commands = [ """INSERT INTO image_members_backup SELECT * FROM image_members;""", """INSERT INTO image_properties_backup SELECT * FROM image_properties;""", ] for command in sql_commands: meta.bind.execute(command) t_image_members.drop() t_image_properties.drop() image_members_backup.rename(name='image_members') image_properties_backup.rename(name='image_properties') def _add_db2_constraints(meta): # Create the foreign keys sql_commands = [ """ALTER TABLE image_members ADD CONSTRAINT member_image_id FOREIGN KEY (image_id) REFERENCES images (id);""", """ALTER TABLE image_properties ADD CONSTRAINT property_image_id FOREIGN KEY (image_id) REFERENCES images (id);""", ] for command in sql_commands: meta.bind.execute(command) def _remove_db2_constraints(meta): # Remove the foreign keys constraints sql_commands = [ """ALTER TABLE image_members DROP CONSTRAINT member_image_id;""", """ALTER TABLE image_properties DROP CONSTRAINT property_image_id;""" ] for command in sql_commands: meta.bind.execute(command) def _downgrade_db2(meta, t_images, t_image_members, t_image_properties): """ Downgrade for DB2. """ t_images.c.id.alter(sqlalchemy.Integer(), primary_key=True) image_members_old = sqlalchemy.Table( 'image_members_old', meta, sqlalchemy.Column('id', sqlalchemy.Integer(), primary_key=True, nullable=False), sqlalchemy.Column('image_id', sqlalchemy.Integer(), nullable=False, index=True), sqlalchemy.Column('member', sqlalchemy.String(255), nullable=False), sqlalchemy.Column('can_share', sqlalchemy.Boolean(), nullable=False, default=False), sqlalchemy.Column('created_at', sqlalchemy.DateTime(), nullable=False), sqlalchemy.Column('updated_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted', sqlalchemy.Boolean(), nullable=False, default=False, index=True), sqlalchemy.UniqueConstraint('image_id', 'member'), extend_existing=True) image_properties_old = sqlalchemy.Table( 'image_properties_old', meta, sqlalchemy.Column('id', sqlalchemy.Integer(), primary_key=True, nullable=False), sqlalchemy.Column('image_id', sqlalchemy.Integer(), nullable=False, index=True), sqlalchemy.Column('name', sqlalchemy.String(255), nullable=False), sqlalchemy.Column('value', sqlalchemy.Text()), sqlalchemy.Column('created_at', sqlalchemy.DateTime(), nullable=False), sqlalchemy.Column('updated_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted_at', sqlalchemy.DateTime()), sqlalchemy.Column('deleted', sqlalchemy.Boolean(), nullable=False, default=False, index=True), sqlalchemy.UniqueConstraint( 'image_id', 'name', name='ix_image_properties_image_id_name'), extend_existing=True) image_members_old.create() image_properties_old.create() sql_commands = [ """INSERT INTO image_members_old SELECT * FROM image_members;""", """INSERT INTO image_properties_old SELECT * FROM image_properties;""", ] for command in sql_commands: meta.bind.execute(command) t_image_members.drop() t_image_properties.drop() image_members_old.rename(name='image_members') image_properties_old.rename(name='image_properties') def _downgrade_sqlite(meta, t_images, t_image_members, t_image_properties): """ Downgrade 012 -> 011 with special SQLite-compatible logic. """ sql_commands = [ """CREATE TABLE images_backup ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER NOT NULL, min_ram INTEGER NOT NULL, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) );""", """INSERT INTO images_backup SELECT * FROM images;""", """CREATE TABLE image_members_backup ( id INTEGER NOT NULL, image_id INTEGER NOT NULL, member VARCHAR(255) NOT NULL, can_share BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), UNIQUE (image_id, member), CHECK (can_share IN (0, 1)), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id) );""", """INSERT INTO image_members_backup SELECT * FROM image_members;""", """CREATE TABLE image_properties_backup ( id INTEGER NOT NULL, image_id INTEGER NOT NULL, name VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), CHECK (deleted IN (0, 1)), UNIQUE (image_id, name), FOREIGN KEY(image_id) REFERENCES images (id) );""", """INSERT INTO image_properties_backup SELECT * FROM image_properties;""", ] for command in sql_commands: meta.bind.execute(command) _sqlite_table_swap(meta, t_image_members, t_image_properties, t_images) def _upgrade_other(t_images, t_image_members, t_image_properties, dialect): """ Upgrade 011 -> 012 with logic for non-SQLite databases. """ foreign_keys = _get_foreign_keys(t_images, t_image_members, t_image_properties, dialect) for fk in foreign_keys: fk.drop() t_images.c.id.alter(sqlalchemy.String(36), primary_key=True) t_image_members.c.image_id.alter(sqlalchemy.String(36)) t_image_properties.c.image_id.alter(sqlalchemy.String(36)) _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties) for fk in foreign_keys: fk.create() def _downgrade_other(t_images, t_image_members, t_image_properties, dialect): """ Downgrade 012 -> 011 with logic for non-SQLite databases. """ foreign_keys = _get_foreign_keys(t_images, t_image_members, t_image_properties, dialect) for fk in foreign_keys: fk.drop() _update_all_uuids_to_ids(t_images, t_image_members, t_image_properties) t_images.c.id.alter(primary_key=True) # we have to use raw sql for postgresql as we have errors # if we use alter type on sqlalchemy if dialect == 'postgresql': t_images.bind.execute('''ALTER TABLE images ALTER COLUMN id TYPE INTEGER USING (id::INTEGER)''') t_images.bind.execute('''ALTER TABLE image_members ALTER COLUMN image_id TYPE INTEGER USING (image_id::INTEGER)''') t_images.bind.execute('''ALTER TABLE image_properties ALTER COLUMN image_id TYPE INTEGER USING (image_id::INTEGER)''') else: t_images.c.id.alter(sqlalchemy.Integer()) t_image_members.c.image_id.alter(sqlalchemy.Integer()) t_image_properties.c.image_id.alter(sqlalchemy.Integer()) for fk in foreign_keys: fk.create() def _sqlite_table_swap(meta, t_image_members, t_image_properties, t_images): t_image_members.drop() t_image_properties.drop() t_images.drop() meta.bind.execute("ALTER TABLE images_backup " "RENAME TO images") meta.bind.execute("ALTER TABLE image_members_backup " "RENAME TO image_members") meta.bind.execute("ALTER TABLE image_properties_backup " "RENAME TO image_properties") meta.bind.execute("""CREATE INDEX ix_image_properties_deleted ON image_properties (deleted);""") meta.bind.execute("""CREATE INDEX ix_image_properties_name ON image_properties (name);""") def _get_table(table_name, metadata): """Return a sqlalchemy Table definition with associated metadata.""" return sqlalchemy.Table(table_name, metadata, autoload=True) def _get_foreign_keys(t_images, t_image_members, t_image_properties, dialect): """Retrieve and return foreign keys for members/properties tables.""" foreign_keys = [] if t_image_members.foreign_keys: img_members_fk_name = list(t_image_members.foreign_keys)[0].name if dialect == 'mysql': fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id], [t_images.c.id], name=img_members_fk_name) else: fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id], [t_images.c.id]) foreign_keys.append(fk1) if t_image_properties.foreign_keys: img_properties_fk_name = list(t_image_properties.foreign_keys)[0].name if dialect == 'mysql': fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id], [t_images.c.id], name=img_properties_fk_name) else: fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id], [t_images.c.id]) foreign_keys.append(fk2) return foreign_keys def _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties): """Transition from INTEGER id to VARCHAR(36) id.""" images = list(t_images.select().execute()) for image in images: old_id = image["id"] new_id = str(uuid.uuid4()) t_images.update().where( t_images.c.id == old_id).values(id=new_id).execute() t_image_members.update().where( t_image_members.c.image_id == old_id).values( image_id=new_id).execute() t_image_properties.update().where( t_image_properties.c.image_id == old_id).values( image_id=new_id).execute() t_image_properties.update().where( and_(or_(t_image_properties.c.name == 'kernel_id', t_image_properties.c.name == 'ramdisk_id'), t_image_properties.c.value == old_id)).values( value=new_id).execute() def _update_all_uuids_to_ids(t_images, t_image_members, t_image_properties): """Transition from VARCHAR(36) id to INTEGER id.""" images = list(t_images.select().execute()) new_id = 1 for image in images: old_id = image["id"] t_images.update().where( t_images.c.id == old_id).values( id=str(new_id)).execute() t_image_members.update().where( t_image_members.c.image_id == old_id).values( image_id=str(new_id)).execute() t_image_properties.update().where( t_image_properties.c.image_id == old_id).values( image_id=str(new_id)).execute() t_image_properties.update().where( and_(or_(t_image_properties.c.name == 'kernel_id', t_image_properties.c.name == 'ramdisk_id'), t_image_properties.c.value == old_id)).values( value=str(new_id)).execute() new_id += 1 glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/032_add_task_info_table.py0000664000567000056710000000616312701407047031373 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import (Column, ForeignKey, MetaData, Table) from glance.db.sqlalchemy.migrate_repo.schema import (String, Text, create_tables, drop_tables) # noqa TASKS_MIGRATE_COLUMNS = ['input', 'message', 'result'] def define_task_info_table(meta): Table('tasks', meta, autoload=True) # NOTE(nikhil): input and result are stored as text in the DB. # SQLAlchemy marshals the data to/from JSON using custom type # JSONEncodedDict. It uses simplejson underneath. task_info = Table('task_info', meta, Column('task_id', String(36), ForeignKey('tasks.id'), primary_key=True, nullable=False), Column('input', Text()), Column('result', Text()), Column('message', Text()), mysql_engine='InnoDB', mysql_charset='utf8') return task_info def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_task_info_table(meta)] create_tables(tables) tasks_table = Table('tasks', meta, autoload=True) task_info_table = Table('task_info', meta, autoload=True) tasks = tasks_table.select().execute().fetchall() for task in tasks: values = { 'task_id': task.id, 'input': task.input, 'result': task.result, 'message': task.message, } task_info_table.insert(values=values).execute() for col_name in TASKS_MIGRATE_COLUMNS: tasks_table.columns[col_name].drop() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tasks_table = Table('tasks', meta, autoload=True) task_info_table = Table('task_info', meta, autoload=True) for col_name in TASKS_MIGRATE_COLUMNS: column = Column(col_name, Text()) column.create(tasks_table) task_info_records = task_info_table.select().execute().fetchall() for task_info in task_info_records: values = { 'input': task_info.input, 'result': task_info.result, 'message': task_info.message } tasks_table.update(values=values).where( tasks_table.c.id == task_info.task_id).execute() drop_tables([task_info_table]) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/044_update_metadef_os_nova_server.py0000664000567000056710000000213112701407047033512 0ustar jenkinsjenkins00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Software, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine resource_types_table = Table('metadef_resource_types', meta, autoload=True) resource_types_table.update(values={'name': 'OS::Nova::Server'}).where( resource_types_table.c.name == 'OS::Nova::Instance').execute() def downgrade(migrate_engine): # NOTE(TravT): This is a bug fix (1537903). It shouldn't be downgraded. return ././@LongLink0000000000000000000000000000016400000000000011216 Lustar 00000000000000glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/042_add_changes_to_reinstall_unique_metadef_constraints.pyglance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/042_add_changes_to_reinstall_unique_metadef0000664000567000056710000005761612701407047035074 0ustar jenkinsjenkins00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import migrate import sqlalchemy from sqlalchemy import (func, Index, inspect, orm, String, Table, type_coerce) # The _upgrade...get_duplicate() def's are separate functions to # accommodate sqlite which locks the database against updates as long as # db_recs is active. # In addition, sqlite doesn't support the function 'concat' between # Strings and Integers, so, the updating of records is also adjusted. def _upgrade_metadef_namespaces_get_duplicates(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) session = orm.sessionmaker(bind=migrate_engine)() db_recs = (session.query(func.min(metadef_namespaces.c.id), metadef_namespaces.c.namespace) .group_by(metadef_namespaces.c.namespace) .having(func.count(metadef_namespaces.c.namespace) > 1)) dbrecs = [] for row in db_recs: dbrecs.append({'id': row[0], 'namespace': row[1]}) session.close() return dbrecs def _upgrade_metadef_objects_get_duplicates(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) metadef_objects = Table('metadef_objects', meta, autoload=True) session = orm.sessionmaker(bind=migrate_engine)() db_recs = (session.query(func.min(metadef_objects.c.id), metadef_objects.c.namespace_id, metadef_objects.c.name) .group_by(metadef_objects.c.namespace_id, metadef_objects.c.name) .having(func.count() > 1)) dbrecs = [] for row in db_recs: dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]}) session.close() return dbrecs def _upgrade_metadef_properties_get_duplicates(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) metadef_properties = Table('metadef_properties', meta, autoload=True) session = orm.sessionmaker(bind=migrate_engine)() db_recs = (session.query(func.min(metadef_properties.c.id), metadef_properties.c.namespace_id, metadef_properties.c.name) .group_by(metadef_properties.c.namespace_id, metadef_properties.c.name) .having(func.count() > 1)) dbrecs = [] for row in db_recs: dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]}) session.close() return dbrecs def _upgrade_metadef_tags_get_duplicates(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) metadef_tags = Table('metadef_tags', meta, autoload=True) session = orm.sessionmaker(bind=migrate_engine)() db_recs = (session.query(func.min(metadef_tags.c.id), metadef_tags.c.namespace_id, metadef_tags.c.name) .group_by(metadef_tags.c.namespace_id, metadef_tags.c.name) .having(func.count() > 1)) dbrecs = [] for row in db_recs: dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]}) session.close() return dbrecs def _upgrade_metadef_resource_types_get_duplicates(migrate_engine): meta = sqlalchemy.schema.MetaData(migrate_engine) metadef_resource_types = Table('metadef_resource_types', meta, autoload=True) session = orm.sessionmaker(bind=migrate_engine)() db_recs = (session.query(func.min(metadef_resource_types.c.id), metadef_resource_types.c.name) .group_by(metadef_resource_types.c.name) .having(func.count(metadef_resource_types.c.name) > 1)) dbrecs = [] for row in db_recs: dbrecs.append({'id': row[0], 'name': row[1]}) session.close() return dbrecs def _upgrade_data(migrate_engine): # Rename duplicates to be unique. meta = sqlalchemy.schema.MetaData(migrate_engine) # ORM tables metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) metadef_objects = Table('metadef_objects', meta, autoload=True) metadef_properties = Table('metadef_properties', meta, autoload=True) metadef_tags = Table('metadef_tags', meta, autoload=True) metadef_resource_types = Table('metadef_resource_types', meta, autoload=True) # Fix duplicate metadef_namespaces # Update the non-first record(s) with an unique namespace value dbrecs = _upgrade_metadef_namespaces_get_duplicates(migrate_engine) for row in dbrecs: s = (metadef_namespaces.update() .where(metadef_namespaces.c.id > row['id']) .where(metadef_namespaces.c.namespace == row['namespace']) ) if migrate_engine.name == 'sqlite': s = (s.values(namespace=(row['namespace'] + '-DUPL-' + type_coerce(metadef_namespaces.c.id, String)), display_name=(row['namespace'] + '-DUPL-' + type_coerce(metadef_namespaces.c.id, String)))) else: s = s.values(namespace=func.concat(row['namespace'], '-DUPL-', metadef_namespaces.c.id), display_name=func.concat(row['namespace'], '-DUPL-', metadef_namespaces.c.id)) s.execute() # Fix duplicate metadef_objects dbrecs = _upgrade_metadef_objects_get_duplicates(migrate_engine) for row in dbrecs: s = (metadef_objects.update() .where(metadef_objects.c.id > row['id']) .where(metadef_objects.c.namespace_id == row['namespace_id']) .where(metadef_objects.c.name == str(row['name'])) ) if migrate_engine.name == 'sqlite': s = (s.values(name=(row['name'] + '-DUPL-' + type_coerce(metadef_objects.c.id, String)))) else: s = s.values(name=func.concat(row['name'], '-DUPL-', metadef_objects.c.id)) s.execute() # Fix duplicate metadef_properties dbrecs = _upgrade_metadef_properties_get_duplicates(migrate_engine) for row in dbrecs: s = (metadef_properties.update() .where(metadef_properties.c.id > row['id']) .where(metadef_properties.c.namespace_id == row['namespace_id']) .where(metadef_properties.c.name == str(row['name'])) ) if migrate_engine.name == 'sqlite': s = (s.values(name=(row['name'] + '-DUPL-' + type_coerce(metadef_properties.c.id, String))) ) else: s = s.values(name=func.concat(row['name'], '-DUPL-', metadef_properties.c.id)) s.execute() # Fix duplicate metadef_tags dbrecs = _upgrade_metadef_tags_get_duplicates(migrate_engine) for row in dbrecs: s = (metadef_tags.update() .where(metadef_tags.c.id > row['id']) .where(metadef_tags.c.namespace_id == row['namespace_id']) .where(metadef_tags.c.name == str(row['name'])) ) if migrate_engine.name == 'sqlite': s = (s.values(name=(row['name'] + '-DUPL-' + type_coerce(metadef_tags.c.id, String))) ) else: s = s.values(name=func.concat(row['name'], '-DUPL-', metadef_tags.c.id)) s.execute() # Fix duplicate metadef_resource_types dbrecs = _upgrade_metadef_resource_types_get_duplicates(migrate_engine) for row in dbrecs: s = (metadef_resource_types.update() .where(metadef_resource_types.c.id > row['id']) .where(metadef_resource_types.c.name == str(row['name'])) ) if migrate_engine.name == 'sqlite': s = (s.values(name=(row['name'] + '-DUPL-' + type_coerce(metadef_resource_types.c.id, String))) ) else: s = s.values(name=func.concat(row['name'], '-DUPL-', metadef_resource_types.c.id)) s.execute() def _update_sqlite_namespace_id_name_constraint(metadef, metadef_namespaces, new_constraint_name, new_fk_name): migrate.UniqueConstraint( metadef.c.namespace_id, metadef.c.name).drop() migrate.UniqueConstraint( metadef.c.namespace_id, metadef.c.name, name=new_constraint_name).create() migrate.ForeignKeyConstraint( [metadef.c.namespace_id], [metadef_namespaces.c.id], name=new_fk_name).create() def _downgrade_sqlite_namespace_id_name_constraint(metadef, metadef_namespaces, constraint_name, fk_name): migrate.UniqueConstraint( metadef.c.namespace_id, metadef.c.name, name=constraint_name).drop() migrate.UniqueConstraint( metadef.c.namespace_id, metadef.c.name).create() migrate.ForeignKeyConstraint( [metadef.c.namespace_id], [metadef_namespaces.c.id], name=fk_name).drop() migrate.ForeignKeyConstraint( [metadef.c.namespace_id], [metadef_namespaces.c.id]).create() def _drop_unique_constraint_if_exists(inspector, table_name, metadef): name = _get_unique_constraint_name(inspector, table_name, ['namespace_id', 'name']) if name: migrate.UniqueConstraint(metadef.c.namespace_id, metadef.c.name, name=name).drop() def _drop_index_with_fk_constraint(metadef, metadef_namespaces, index_name, fk_old_name, fk_new_name): fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id], [metadef_namespaces.c.id], name=fk_old_name) fkc.drop() if index_name: Index(index_name, metadef.c.namespace_id).drop() # Rename the fk for consistency across all db's fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id], [metadef_namespaces.c.id], name=fk_new_name) fkc.create() def _downgrade_constraint_with_fk(metadef, metadef_namespaces, constraint_name, fk_curr_name, fk_next_name): fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id], [metadef_namespaces.c.id], name=fk_curr_name) fkc.drop() migrate.UniqueConstraint(metadef.c.namespace_id, metadef.c.name, name=constraint_name).drop() fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id], [metadef_namespaces.c.id], name=fk_next_name) fkc.create() def _get_unique_constraint_name(inspector, table_name, columns): constraints = inspector.get_unique_constraints(table_name) for constraint in constraints: if set(constraint['column_names']) == set(columns): return constraint['name'] return None def _get_fk_constraint_name(inspector, table_name, columns): constraints = inspector.get_foreign_keys(table_name) for constraint in constraints: if set(constraint['constrained_columns']) == set(columns): return constraint['name'] return None def upgrade(migrate_engine): _upgrade_data(migrate_engine) meta = sqlalchemy.MetaData() meta.bind = migrate_engine inspector = inspect(migrate_engine) # ORM tables metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) metadef_objects = Table('metadef_objects', meta, autoload=True) metadef_properties = Table('metadef_properties', meta, autoload=True) metadef_tags = Table('metadef_tags', meta, autoload=True) metadef_ns_res_types = Table('metadef_namespace_resource_types', meta, autoload=True) metadef_resource_types = Table('metadef_resource_types', meta, autoload=True) # Drop the bad, non-unique indices. if migrate_engine.name == 'sqlite': # For sqlite: # Only after the unique constraints have been added should the indices # be dropped. If done the other way, sqlite complains during # constraint adding/dropping that the index does/does not exist. # Note: The _get_unique_constraint_name, _get_fk_constraint_name # return None for constraints that do in fact exist. Also, # get_index_names returns names, but, the names can not be used with # the Index(name, blah).drop() command, so, putting sqlite into # it's own section. # Objects _update_sqlite_namespace_id_name_constraint( metadef_objects, metadef_namespaces, 'uq_metadef_objects_namespace_id_name', 'metadef_objects_fk_1') # Properties _update_sqlite_namespace_id_name_constraint( metadef_properties, metadef_namespaces, 'uq_metadef_properties_namespace_id_name', 'metadef_properties_fk_1') # Tags _update_sqlite_namespace_id_name_constraint( metadef_tags, metadef_namespaces, 'uq_metadef_tags_namespace_id_name', 'metadef_tags_fk_1') # Namespaces migrate.UniqueConstraint( metadef_namespaces.c.namespace).drop() migrate.UniqueConstraint( metadef_namespaces.c.namespace, name='uq_metadef_namespaces_namespace').create() # ResourceTypes migrate.UniqueConstraint( metadef_resource_types.c.name).drop() migrate.UniqueConstraint( metadef_resource_types.c.name, name='uq_metadef_resource_types_name').create() # Now drop the bad indices Index('ix_metadef_objects_namespace_id', metadef_objects.c.namespace_id, metadef_objects.c.name).drop() Index('ix_metadef_properties_namespace_id', metadef_properties.c.namespace_id, metadef_properties.c.name).drop() Index('ix_metadef_tags_namespace_id', metadef_tags.c.namespace_id, metadef_tags.c.name).drop() else: # First drop the bad non-unique indices. # To do that (for mysql), must first drop foreign key constraints # BY NAME and then drop the bad indices. # Finally, re-create the foreign key constraints with a consistent # name. # DB2 still has unique constraints, but, they are badly named. # Drop them, they will be recreated at the final step. name = _get_unique_constraint_name(inspector, 'metadef_namespaces', ['namespace']) if name: migrate.UniqueConstraint(metadef_namespaces.c.namespace, name=name).drop() _drop_unique_constraint_if_exists(inspector, 'metadef_objects', metadef_objects) _drop_unique_constraint_if_exists(inspector, 'metadef_properties', metadef_properties) _drop_unique_constraint_if_exists(inspector, 'metadef_tags', metadef_tags) name = _get_unique_constraint_name(inspector, 'metadef_resource_types', ['name']) if name: migrate.UniqueConstraint(metadef_resource_types.c.name, name=name).drop() # Objects _drop_index_with_fk_constraint( metadef_objects, metadef_namespaces, 'ix_metadef_objects_namespace_id', _get_fk_constraint_name( inspector, 'metadef_objects', ['namespace_id']), 'metadef_objects_fk_1') # Properties _drop_index_with_fk_constraint( metadef_properties, metadef_namespaces, 'ix_metadef_properties_namespace_id', _get_fk_constraint_name( inspector, 'metadef_properties', ['namespace_id']), 'metadef_properties_fk_1') # Tags _drop_index_with_fk_constraint( metadef_tags, metadef_namespaces, 'ix_metadef_tags_namespace_id', _get_fk_constraint_name( inspector, 'metadef_tags', ['namespace_id']), 'metadef_tags_fk_1') # Drop Others without fk constraints. Index('ix_metadef_namespaces_namespace', metadef_namespaces.c.namespace).drop() # The next two don't exist in ibm_db_sa, but, drop them everywhere else. if migrate_engine.name != 'ibm_db_sa': Index('ix_metadef_resource_types_name', metadef_resource_types.c.name).drop() # Not needed due to primary key on same columns Index('ix_metadef_ns_res_types_res_type_id_ns_id', metadef_ns_res_types.c.resource_type_id, metadef_ns_res_types.c.namespace_id).drop() # Now, add back the dropped indexes as unique constraints if migrate_engine.name != 'sqlite': # Namespaces migrate.UniqueConstraint( metadef_namespaces.c.namespace, name='uq_metadef_namespaces_namespace').create() # Objects migrate.UniqueConstraint( metadef_objects.c.namespace_id, metadef_objects.c.name, name='uq_metadef_objects_namespace_id_name').create() # Properties migrate.UniqueConstraint( metadef_properties.c.namespace_id, metadef_properties.c.name, name='uq_metadef_properties_namespace_id_name').create() # Tags migrate.UniqueConstraint( metadef_tags.c.namespace_id, metadef_tags.c.name, name='uq_metadef_tags_namespace_id_name').create() # Resource Types migrate.UniqueConstraint( metadef_resource_types.c.name, name='uq_metadef_resource_types_name').create() def downgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine # ORM tables metadef_namespaces = Table('metadef_namespaces', meta, autoload=True) metadef_objects = Table('metadef_objects', meta, autoload=True) metadef_properties = Table('metadef_properties', meta, autoload=True) metadef_tags = Table('metadef_tags', meta, autoload=True) metadef_resource_types = Table('metadef_resource_types', meta, autoload=True) metadef_ns_res_types = Table('metadef_namespace_resource_types', meta, autoload=True) # Drop the unique constraints if migrate_engine.name == 'sqlite': # Objects _downgrade_sqlite_namespace_id_name_constraint( metadef_objects, metadef_namespaces, 'uq_metadef_objects_namespace_id_name', 'metadef_objects_fk_1') # Properties _downgrade_sqlite_namespace_id_name_constraint( metadef_properties, metadef_namespaces, 'uq_metadef_properties_namespace_id_name', 'metadef_properties_fk_1') # Tags _downgrade_sqlite_namespace_id_name_constraint( metadef_tags, metadef_namespaces, 'uq_metadef_tags_namespace_id_name', 'metadef_tags_fk_1') # Namespaces migrate.UniqueConstraint( metadef_namespaces.c.namespace, name='uq_metadef_namespaces_namespace').drop() migrate.UniqueConstraint( metadef_namespaces.c.namespace).create() # ResourceTypes migrate.UniqueConstraint( metadef_resource_types.c.name, name='uq_metadef_resource_types_name').drop() migrate.UniqueConstraint( metadef_resource_types.c.name).create() else: # For mysql, must drop foreign key constraints before dropping the # unique constraint. So drop the fkc, then drop the constraints, # then recreate the fkc. # Objects _downgrade_constraint_with_fk( metadef_objects, metadef_namespaces, 'uq_metadef_objects_namespace_id_name', 'metadef_objects_fk_1', None) # Properties _downgrade_constraint_with_fk( metadef_properties, metadef_namespaces, 'uq_metadef_properties_namespace_id_name', 'metadef_properties_fk_1', None) # Tags _downgrade_constraint_with_fk( metadef_tags, metadef_namespaces, 'uq_metadef_tags_namespace_id_name', 'metadef_tags_fk_1', 'metadef_tags_namespace_id_fkey') # Namespaces migrate.UniqueConstraint( metadef_namespaces.c.namespace, name='uq_metadef_namespaces_namespace').drop() # Resource_types migrate.UniqueConstraint( metadef_resource_types.c.name, name='uq_metadef_resource_types_name').drop() # Create dropped unique constraints as bad, non-unique indexes Index('ix_metadef_objects_namespace_id', metadef_objects.c.namespace_id).create() Index('ix_metadef_properties_namespace_id', metadef_properties.c.namespace_id).create() # These need to be done before the metadef_tags and metadef_namespaces # unique constraints are created to avoid 'tuple out of range' errors # in db2. Index('ix_metadef_tags_namespace_id', metadef_tags.c.namespace_id, metadef_tags.c.name).create() Index('ix_metadef_namespaces_namespace', metadef_namespaces.c.namespace).create() # Create these everywhere, except for db2 if migrate_engine.name != 'ibm_db_sa': Index('ix_metadef_resource_types_name', metadef_resource_types.c.name).create() Index('ix_metadef_ns_res_types_res_type_id_ns_id', metadef_ns_res_types.c.resource_type_id, metadef_ns_res_types.c.namespace_id).create() else: # Recreate the badly named unique constraints in db2 migrate.UniqueConstraint( metadef_namespaces.c.namespace, name='ix_namespaces_namespace').create() migrate.UniqueConstraint( metadef_objects.c.namespace_id, metadef_objects.c.name, name='ix_objects_namespace_id_name').create() migrate.UniqueConstraint( metadef_properties.c.namespace_id, metadef_properties.c.name, name='ix_metadef_properties_namespace_id_name').create() migrate.UniqueConstraint( metadef_tags.c.namespace_id, metadef_tags.c.name).create() migrate.UniqueConstraint( metadef_resource_types.c.name, name='ix_metadef_resource_types_name').create() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/037_add_changes_to_satisfy_models.py0000664000567000056710000001207512701407047033472 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from sqlalchemy import Table, Index, UniqueConstraint, Sequence from sqlalchemy.schema import (AddConstraint, DropConstraint, CreateIndex, ForeignKeyConstraint) from sqlalchemy import sql from sqlalchemy import update def upgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine if migrate_engine.name not in ['mysql', 'postgresql']: return image_properties = Table('image_properties', meta, autoload=True) image_members = Table('image_members', meta, autoload=True) images = Table('images', meta, autoload=True) # We have to ensure that we doesn't have `nulls` values since we are going # to set nullable=False migrate_engine.execute( update(image_members) .where(image_members.c.status == sql.expression.null()) .values(status='pending')) migrate_engine.execute( update(images) .where(images.c.protected == sql.expression.null()) .values(protected=sql.expression.false())) image_members.c.status.alter(nullable=False, server_default='pending') images.c.protected.alter( nullable=False, server_default=sql.expression.false()) if migrate_engine.name == 'postgresql': Index('ix_image_properties_image_id_name', image_properties.c.image_id, image_properties.c.name).drop() # We have different names of this constraint in different versions of # postgresql. Since we have only one constraint on this table, we can # get it in the following way. name = migrate_engine.execute( """SELECT conname FROM pg_constraint WHERE conrelid = (SELECT oid FROM pg_class WHERE relname LIKE 'image_properties') AND contype = 'u';""").scalar() constraint = UniqueConstraint(image_properties.c.image_id, image_properties.c.name, name='%s' % name) migrate_engine.execute(DropConstraint(constraint)) constraint = UniqueConstraint(image_properties.c.image_id, image_properties.c.name, name='ix_image_properties_image_id_name') migrate_engine.execute(AddConstraint(constraint)) images.c.id.alter(server_default=None) if migrate_engine.name == 'mysql': constraint = UniqueConstraint(image_properties.c.image_id, image_properties.c.name, name='image_id') migrate_engine.execute(DropConstraint(constraint)) image_locations = Table('image_locations', meta, autoload=True) if len(image_locations.foreign_keys) == 0: migrate_engine.execute(AddConstraint(ForeignKeyConstraint( [image_locations.c.image_id], [images.c.id]))) def downgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine if migrate_engine.name not in ['mysql', 'postgresql']: return image_properties = Table('image_properties', meta, autoload=True) image_members = Table('image_members', meta, autoload=True) images = Table('images', meta, autoload=True) if migrate_engine.name == 'postgresql': constraint = UniqueConstraint(image_properties.c.image_id, image_properties.c.name, name='ix_image_properties_image_id_name') migrate_engine.execute(DropConstraint(constraint)) constraint = UniqueConstraint(image_properties.c.image_id, image_properties.c.name) migrate_engine.execute(AddConstraint(constraint)) index = Index('ix_image_properties_image_id_name', image_properties.c.image_id, image_properties.c.name) migrate_engine.execute(CreateIndex(index)) images.c.id.alter(server_default=Sequence('images_id_seq') .next_value()) if migrate_engine.name == 'mysql': constraint = UniqueConstraint(image_properties.c.image_id, image_properties.c.name, name='image_id') migrate_engine.execute(AddConstraint(constraint)) image_members.c.status.alter(nullable=True, server_default=None) images.c.protected.alter(nullable=True, server_default=None) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/011_sqlite_upgrade.sql0000664000567000056710000000320012701407047030600 0ustar jenkinsjenkins00000000000000CREATE TEMPORARY TABLE images_backup ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER, min_ram INTEGER, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); INSERT INTO images_backup SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram FROM images; DROP TABLE images; CREATE TABLE images ( id INTEGER NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, location TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER NOT NULL, min_ram INTEGER NOT NULL, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); CREATE INDEX ix_images_deleted ON images (deleted); CREATE INDEX ix_images_is_public ON images (is_public); INSERT INTO images SELECT id, name, size, status, is_public, location, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram FROM images_backup; DROP TABLE images_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/006_mysql_upgrade.sql0000664000567000056710000000054512701407047030461 0ustar jenkinsjenkins00000000000000-- -- This file is necessary because MySQL does not support -- renaming indexes. -- DROP INDEX ix_image_properties_image_id_key ON image_properties; -- Rename the `key` column to `name` ALTER TABLE image_properties CHANGE COLUMN `key` name VARCHAR(255) NOT NULL; CREATE UNIQUE INDEX ix_image_properties_image_id_name ON image_properties (image_id, name); glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py0000664000567000056710000000165312701407047035007 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from sqlalchemy import (Table, Index) def upgrade(migrate_engine): if migrate_engine.name == 'mysql': meta = sqlalchemy.MetaData() meta.bind = migrate_engine metadef_tags = Table('metadef_tags', meta, autoload=True) Index('namespace_id', metadef_tags.c.namespace_id, metadef_tags.c.name).drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/006_key_to_name.py0000664000567000056710000001164512701407047027733 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import * # noqa from sqlalchemy import * # noqa from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa def get_images_table(meta): """ No changes to the image properties table from 002... """ (get_images_table,) = from_migration_import( '004_add_checksum', ['get_images_table']) images = get_images_table(meta) return images def get_image_properties_table(meta): """ Returns the Table object for the image_properties table that corresponds to the image_properties table definition of this version. """ (get_images_table,) = from_migration_import( '004_add_checksum', ['get_images_table']) images = get_images_table(meta) # noqa image_properties = Table('image_properties', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('image_id', Integer(), ForeignKey('images.id'), nullable=False, index=True), Column('name', String(255), nullable=False), Column('value', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), UniqueConstraint('image_id', 'name'), mysql_engine='InnoDB', extend_existing=True) return image_properties def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine (get_image_properties_table,) = from_migration_import( '004_add_checksum', ['get_image_properties_table']) image_properties = get_image_properties_table(meta) if migrate_engine.name == "ibm_db_sa": # NOTE(dperaza) ibm db2 does not allow ALTER INDEX so we will drop # the index, rename the column, then re-create the index sql_commands = [ """ALTER TABLE image_properties DROP UNIQUE ix_image_properties_image_id_key;""", """ALTER TABLE image_properties RENAME COLUMN \"key\" to name;""", """ALTER TABLE image_properties ADD CONSTRAINT ix_image_properties_image_id_name UNIQUE(image_id, name);""", ] for command in sql_commands: meta.bind.execute(command) else: index = Index('ix_image_properties_image_id_key', image_properties.c.image_id, image_properties.c.key) index.rename('ix_image_properties_image_id_name') image_properties = get_image_properties_table(meta) image_properties.columns['key'].alter(name="name") def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine image_properties = get_image_properties_table(meta) if migrate_engine.name == "ibm_db_sa": # NOTE(dperaza) ibm db2 does not allow ALTER INDEX so we will drop # the index, rename the column, then re-create the index sql_commands = [ """ALTER TABLE image_properties DROP UNIQUE ix_image_properties_image_id_name;""", """ALTER TABLE image_properties RENAME COLUMN name to \"key\";""", """ALTER TABLE image_properties ADD CONSTRAINT ix_image_properties_image_id_key UNIQUE(image_id, \"key\");""", ] for command in sql_commands: meta.bind.execute(command) else: index = Index('ix_image_properties_image_id_name', image_properties.c.image_id, image_properties.c.name) index.rename('ix_image_properties_image_id_key') image_properties.columns['name'].alter(name="key") glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/006_mysql_downgrade.sql0000664000567000056710000000054612701407047031005 0ustar jenkinsjenkins00000000000000-- -- This file is necessary because MySQL does not support -- renaming indexes. -- DROP INDEX ix_image_properties_image_id_name ON image_properties; -- Rename the `key` column to `name` ALTER TABLE image_properties CHANGE COLUMN name `key` VARCHAR(255) NOT NULL; CREATE UNIQUE INDEX ix_image_properties_image_id_key ON image_properties (image_id, `key`); glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/013_add_protected.py0000664000567000056710000000205312701407047030231 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Table, Column, Boolean meta = MetaData() protected = Column('protected', Boolean, default=False) def upgrade(migrate_engine): meta.bind = migrate_engine images = Table('images', meta, autoload=True) images.create_column(protected) def downgrade(migrate_engine): meta.bind = migrate_engine images = Table('images', meta, autoload=True) images.drop_column(protected) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py0000664000567000056710000000457012701407047031105 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import * # noqa from sqlalchemy import * # noqa from glance.db.sqlalchemy.migrate_repo.schema import from_migration_import def get_images_table(meta): """ No changes to the images table from 008... """ (get_images_table,) = from_migration_import( '008_add_image_members_table', ['get_images_table']) images = get_images_table(meta) return images def get_image_properties_table(meta): """ No changes to the image properties table from 008... """ (get_image_properties_table,) = from_migration_import( '008_add_image_members_table', ['get_image_properties_table']) image_properties = get_image_properties_table(meta) return image_properties def get_image_members_table(meta): """ No changes to the image members table from 008... """ (get_image_members_table,) = from_migration_import( '008_add_image_members_table', ['get_image_members_table']) images = get_image_members_table(meta) return images def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images_table = get_images_table(meta) # set updated_at to created_at if equal to None conn = migrate_engine.connect() conn.execute( images_table.update( images_table.c.updated_at == None, {images_table.c.updated_at: images_table.c.created_at})) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images_table = get_images_table(meta) # set updated_at to None if equal to created_at conn = migrate_engine.connect() conn.execute( images_table.update( images_table.c.updated_at == images_table.c.created_at, {images_table.c.updated_at: None})) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/037_sqlite_upgrade.sql0000664000567000056710000001133212701407047030615 0ustar jenkinsjenkins00000000000000UPDATE images SET protected = 0 WHERE protected is NULL; UPDATE image_members SET status = 'pending' WHERE status is NULL; CREATE TEMPORARY TABLE images_backup ( id VARCHAR(36) NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER, min_ram INTEGER, protected BOOLEAN NOT NULL DEFAULT 0, virtual_size INTEGER, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); INSERT INTO images_backup SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, protected, virtual_size FROM images; DROP TABLE images; CREATE TABLE images ( id VARCHAR(36) NOT NULL, name VARCHAR(255), size INTEGER, status VARCHAR(30) NOT NULL, is_public BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, disk_format VARCHAR(20), container_format VARCHAR(20), checksum VARCHAR(32), owner VARCHAR(255), min_disk INTEGER NOT NULL, min_ram INTEGER NOT NULL, protected BOOLEAN NOT NULL DEFAULT 0, virtual_size INTEGER, PRIMARY KEY (id), CHECK (is_public IN (0, 1)), CHECK (deleted IN (0, 1)) ); CREATE INDEX ix_images_deleted ON images (deleted); CREATE INDEX ix_images_is_public ON images (is_public); CREATE INDEX owner_image_idx ON images (owner); CREATE INDEX checksum_image_idx ON images (checksum); INSERT INTO images SELECT id, name, size, status, is_public, created_at, updated_at, deleted_at, deleted, disk_format, container_format, checksum, owner, min_disk, min_ram, protected, virtual_size FROM images_backup; DROP TABLE images_backup; CREATE TEMPORARY TABLE image_members_backup ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, member VARCHAR(255) NOT NULL, can_share BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, status VARCHAR(20) NOT NULL DEFAULT 'pending', PRIMARY KEY (id), UNIQUE (image_id, member), CHECK (can_share IN (0, 1)), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id) ); INSERT INTO image_members_backup SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status FROM image_members; DROP TABLE image_members; CREATE TABLE image_members ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, member VARCHAR(255) NOT NULL, can_share BOOLEAN NOT NULL, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, status VARCHAR(20) NOT NULL DEFAULT 'pending', PRIMARY KEY (id), UNIQUE (image_id, member), CHECK (can_share IN (0, 1)), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id), CONSTRAINT image_members_image_id_member_deleted_at_key UNIQUE (image_id, member, deleted_at) ); CREATE INDEX ix_image_members_deleted ON image_members (deleted); CREATE INDEX ix_image_members_image_id ON image_members (image_id); CREATE INDEX ix_image_members_image_id_member ON image_members (image_id, member); INSERT INTO image_members SELECT id, image_id, member, can_share, created_at, updated_at, deleted_at, deleted, status FROM image_members_backup; DROP TABLE image_members_backup; CREATE TEMPORARY TABLE image_properties_backup ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, name VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id) ); INSERT INTO image_properties_backup SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted FROM image_properties; DROP TABLE image_properties; CREATE TABLE image_properties ( id INTEGER NOT NULL, image_id VARCHAR(36) NOT NULL, name VARCHAR(255) NOT NULL, value TEXT, created_at DATETIME NOT NULL, updated_at DATETIME, deleted_at DATETIME, deleted BOOLEAN NOT NULL, PRIMARY KEY (id), CHECK (deleted IN (0, 1)), FOREIGN KEY(image_id) REFERENCES images (id), CONSTRAINT ix_image_properties_image_id_name UNIQUE (image_id, name) ); CREATE INDEX ix_image_properties_deleted ON image_properties (deleted); CREATE INDEX ix_image_properties_image_id ON image_properties (image_id); INSERT INTO image_properties (id, image_id, name, value, created_at, updated_at, deleted_at, deleted) SELECT id, image_id, name, value, created_at, updated_at, deleted_at, deleted FROM image_properties_backup; DROP TABLE image_properties_backup; glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/009_add_mindisk_and_minram.py0000664000567000056710000000614612701407047032077 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import * # noqa from sqlalchemy import * # noqa from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, Integer, String, Text, from_migration_import) # noqa def get_images_table(meta): """ Returns the Table object for the images table that corresponds to the images table definition of this version. """ images = Table('images', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('name', String(255)), Column('disk_format', String(20)), Column('container_format', String(20)), Column('size', Integer()), Column('status', String(30), nullable=False), Column('is_public', Boolean(), nullable=False, default=False, index=True), Column('location', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), Column('checksum', String(32)), Column('owner', String(255)), Column('min_disk', Integer(), default=0), Column('min_ram', Integer(), default=0), mysql_engine='InnoDB', extend_existing=True) return images def get_image_properties_table(meta): """ No changes to the image properties table from 008... """ (define_image_properties_table,) = from_migration_import( '008_add_image_members_table', ['define_image_properties_table']) image_properties = define_image_properties_table(meta) return image_properties def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = get_images_table(meta) min_disk = Column('min_disk', Integer(), default=0) min_disk.create(images) min_ram = Column('min_ram', Integer(), default=0) min_ram.create(images) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine images = get_images_table(meta) images.columns['min_disk'].drop() images.columns['min_ram'].drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/014_add_image_tags_table.py0000664000567000056710000000575112701407047031520 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import schema from glance.db.sqlalchemy.migrate_repo import schema as glance_schema def define_image_tags_table(meta): # Load the images table so the foreign key can be set up properly schema.Table('images', meta, autoload=True) image_tags = schema.Table('image_tags', meta, schema.Column('id', glance_schema.Integer(), primary_key=True, nullable=False), schema.Column('image_id', glance_schema.String(36), schema.ForeignKey('images.id'), nullable=False), schema.Column('value', glance_schema.String(255), nullable=False), schema.Column('created_at', glance_schema.DateTime(), nullable=False), schema.Column('updated_at', glance_schema.DateTime()), schema.Column('deleted_at', glance_schema.DateTime()), schema.Column('deleted', glance_schema.Boolean(), nullable=False, default=False), mysql_engine='InnoDB', mysql_charset='utf8') schema.Index('ix_image_tags_image_id', image_tags.c.image_id) schema.Index('ix_image_tags_image_id_tag_value', image_tags.c.image_id, image_tags.c.value) return image_tags def upgrade(migrate_engine): meta = schema.MetaData() meta.bind = migrate_engine tables = [define_image_tags_table(meta)] glance_schema.create_tables(tables) def downgrade(migrate_engine): meta = schema.MetaData() meta.bind = migrate_engine tables = [define_image_tags_table(meta)] glance_schema.drop_tables(tables) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py0000664000567000056710000000225312701407047032327 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData tables = ['image_locations'] def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine if migrate_engine.name == "mysql": d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';") for row in d.fetchall(): table_name = row[0] if table_name in tables: migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table_name) def downgrade(migrate_engine): pass glance-12.0.0/glance/db/sqlalchemy/migrate_repo/versions/001_add_images_table.py0000664000567000056710000000432612701407047030656 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.schema import (Column, MetaData, Table) from glance.db.sqlalchemy.migrate_repo.schema import ( Boolean, DateTime, Integer, String, Text, create_tables, drop_tables) # noqa def define_images_table(meta): images = Table('images', meta, Column('id', Integer(), primary_key=True, nullable=False), Column('name', String(255)), Column('type', String(30)), Column('size', Integer()), Column('status', String(30), nullable=False), Column('is_public', Boolean(), nullable=False, default=False, index=True), Column('location', Text()), Column('created_at', DateTime(), nullable=False), Column('updated_at', DateTime()), Column('deleted_at', DateTime()), Column('deleted', Boolean(), nullable=False, default=False, index=True), mysql_engine='InnoDB', mysql_charset='utf8', extend_existing=True) return images def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_images_table(meta)] create_tables(tables) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine tables = [define_images_table(meta)] drop_tables(tables) glance-12.0.0/glance/db/sqlalchemy/migrate_repo/manage.py0000664000567000056710000000141212701407047024423 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from migrate.versioning.shell import main # This should probably be a console script entry point. if __name__ == '__main__': main(debug='False', repository='.') glance-12.0.0/glance/db/sqlalchemy/migrate_repo/schema.py0000664000567000056710000000626212701407051024436 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Various conveniences used for migration scripts """ from oslo_log import log as logging import sqlalchemy.types from glance.i18n import _LI LOG = logging.getLogger(__name__) String = lambda length: sqlalchemy.types.String( length=length, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False) Text = lambda: sqlalchemy.types.Text( length=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False) Boolean = lambda: sqlalchemy.types.Boolean(create_constraint=True, name=None) DateTime = lambda: sqlalchemy.types.DateTime(timezone=False) Integer = lambda: sqlalchemy.types.Integer() BigInteger = lambda: sqlalchemy.types.BigInteger() PickleType = lambda: sqlalchemy.types.PickleType() Numeric = lambda: sqlalchemy.types.Numeric() def from_migration_import(module_name, fromlist): """ Import a migration file and return the module :param module_name: name of migration module to import from (ex: 001_add_images_table) :param fromlist: list of items to import (ex: define_images_table) :retval: module object This bit of ugliness warrants an explanation: As you're writing migrations, you'll frequently want to refer to tables defined in previous migrations. In the interest of not repeating yourself, you need a way of importing that table into a 'future' migration. However, tables are bound to metadata, so what you need to import is really a table factory, which you can late-bind to your current metadata object. Moreover, migrations begin with a number (001...), which means they aren't valid Python identifiers. This means we can't perform a 'normal' import on them (the Python lexer will 'splode). Instead, we need to use __import__ magic to bring the table-factory into our namespace. Example Usage: (define_images_table,) = from_migration_import( '001_add_images_table', ['define_images_table']) images = define_images_table(meta) # Refer to images table """ module_path = 'glance.db.sqlalchemy.migrate_repo.versions.%s' % module_name module = __import__(module_path, globals(), locals(), fromlist, 0) return [getattr(module, item) for item in fromlist] def create_tables(tables): for table in tables: LOG.info(_LI("creating table %(table)s"), {'table': table}) table.create() def drop_tables(tables): for table in tables: LOG.info(_LI("dropping table %(table)s"), {'table': table}) table.drop() glance-12.0.0/glance/db/sqlalchemy/migrate_repo/__init__.py0000664000567000056710000000000012701407047024722 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/sqlalchemy/migrate_repo/migrate.cfg0000664000567000056710000000174112701407047024737 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=Glance Migrations # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] glance-12.0.0/glance/db/sqlalchemy/models_metadef.py0000664000567000056710000001575512701407047023505 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for glance metadata schema """ from oslo_db.sqlalchemy import models from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy.orm import relationship from sqlalchemy import String from sqlalchemy import Text from sqlalchemy import UniqueConstraint from glance.common import timeutils from glance.db.sqlalchemy.models import JSONEncodedDict class DictionaryBase(models.ModelBase): metadata = None def to_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d BASE_DICT = declarative_base(cls=DictionaryBase) class GlanceMetadefBase(models.TimestampMixin): """Base class for Glance Metadef Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False __protected_attributes__ = set(["created_at", "updated_at"]) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) # TODO(wko): Column `updated_at` have no default value in # OpenStack common code. We should decide, is this value # required and make changes in oslo (if required) or # in glance (if not). updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=True, onupdate=lambda: timeutils.utcnow()) class MetadefNamespace(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema namespace in the datastore.""" __tablename__ = 'metadef_namespaces' __table_args__ = (UniqueConstraint('namespace', name='uq_metadef_namespaces' '_namespace'), Index('ix_metadef_namespaces_owner', 'owner') ) id = Column(Integer, primary_key=True, nullable=False) namespace = Column(String(80), nullable=False) display_name = Column(String(80)) description = Column(Text()) visibility = Column(String(32)) protected = Column(Boolean) owner = Column(String(255), nullable=False) class MetadefObject(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema object in the datastore.""" __tablename__ = 'metadef_objects' __table_args__ = (UniqueConstraint('namespace_id', 'name', name='uq_metadef_objects_namespace_id' '_name'), Index('ix_metadef_objects_name', 'name') ) id = Column(Integer, primary_key=True, nullable=False) namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), nullable=False) name = Column(String(80), nullable=False) description = Column(Text()) required = Column(Text()) json_schema = Column(JSONEncodedDict(), default={}, nullable=False) class MetadefProperty(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema namespace-property in the datastore.""" __tablename__ = 'metadef_properties' __table_args__ = (UniqueConstraint('namespace_id', 'name', name='uq_metadef_properties_namespace' '_id_name'), Index('ix_metadef_properties_name', 'name') ) id = Column(Integer, primary_key=True, nullable=False) namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), nullable=False) name = Column(String(80), nullable=False) json_schema = Column(JSONEncodedDict(), default={}, nullable=False) class MetadefNamespaceResourceType(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema namespace-property in the datastore.""" __tablename__ = 'metadef_namespace_resource_types' __table_args__ = (Index('ix_metadef_ns_res_types_namespace_id', 'namespace_id'), ) resource_type_id = Column(Integer, ForeignKey('metadef_resource_types.id'), primary_key=True, nullable=False) namespace_id = Column(Integer, ForeignKey('metadef_namespaces.id'), primary_key=True, nullable=False) properties_target = Column(String(80)) prefix = Column(String(80)) class MetadefResourceType(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema resource type in the datastore.""" __tablename__ = 'metadef_resource_types' __table_args__ = (UniqueConstraint('name', name='uq_metadef_resource_types_name'), ) id = Column(Integer, primary_key=True, nullable=False) name = Column(String(80), nullable=False) protected = Column(Boolean, nullable=False, default=False) associations = relationship( "MetadefNamespaceResourceType", primaryjoin=id == MetadefNamespaceResourceType.resource_type_id) class MetadefTag(BASE_DICT, GlanceMetadefBase): """Represents a metadata-schema tag in the data store.""" __tablename__ = 'metadef_tags' __table_args__ = (UniqueConstraint('namespace_id', 'name', name='uq_metadef_tags_namespace_id' '_name'), Index('ix_metadef_tags_name', 'name') ) id = Column(Integer, primary_key=True, nullable=False) namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), nullable=False) name = Column(String(80), nullable=False) def register_models(engine): """Create database tables for all models with the given engine.""" models = (MetadefNamespace, MetadefObject, MetadefProperty, MetadefTag, MetadefResourceType, MetadefNamespaceResourceType) for model in models: model.metadata.create_all(engine) def unregister_models(engine): """Drop database tables for all models with the given engine.""" models = (MetadefObject, MetadefProperty, MetadefNamespaceResourceType, MetadefTag, MetadefNamespace, MetadefResourceType) for model in models: model.metadata.drop_all(engine) glance-12.0.0/glance/db/sqlalchemy/__init__.py0000664000567000056710000000000012701407047022245 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/sqlalchemy/models_glare.py0000664000567000056710000003156312701407047023165 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_db.sqlalchemy import models from sqlalchemy import BigInteger from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy.ext import declarative from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import Numeric from sqlalchemy.orm import backref from sqlalchemy.orm import composite from sqlalchemy.orm import relationship from sqlalchemy import String from sqlalchemy import Text from glance.common import semver_db from glance.common import timeutils import glance.glare as ga BASE = declarative.declarative_base() class ArtifactBase(models.ModelBase, models.TimestampMixin): """Base class for Artifact Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False __protected_attributes__ = set([ "created_at", "updated_at"]) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False, onupdate=lambda: timeutils.utcnow()) def save(self, session=None): from glance.db.sqlalchemy import api as db_api super(ArtifactBase, self).save(session or db_api.get_session()) def keys(self): return self.__dict__.keys() def values(self): return self.__dict__.values() def items(self): return self.__dict__.items() def to_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d def _parse_property_type_value(prop, show_text_properties=True): columns = [ 'int_value', 'string_value', 'bool_value', 'numeric_value'] if show_text_properties: columns.append('text_value') for prop_type in columns: if getattr(prop, prop_type) is not None: return prop_type.rpartition('_')[0], getattr(prop, prop_type) return None, None class Artifact(BASE, ArtifactBase): __tablename__ = 'artifacts' __table_args__ = ( Index('ix_artifact_name_and_version', 'name', 'version_prefix', 'version_suffix'), Index('ix_artifact_type', 'type_name', 'type_version_prefix', 'type_version_suffix'), Index('ix_artifact_state', 'state'), Index('ix_artifact_owner', 'owner'), Index('ix_artifact_visibility', 'visibility'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}) __protected_attributes__ = ArtifactBase.__protected_attributes__.union( set(['published_at', 'deleted_at'])) id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) name = Column(String(255), nullable=False) type_name = Column(String(255), nullable=False) type_version_prefix = Column(BigInteger().with_variant(Integer, "sqlite"), nullable=False) type_version_suffix = Column(String(255)) type_version_meta = Column(String(255)) type_version = composite(semver_db.DBVersion, type_version_prefix, type_version_suffix, type_version_meta, comparator_factory=semver_db.VersionComparator) version_prefix = Column(BigInteger().with_variant(Integer, "sqlite"), nullable=False) version_suffix = Column(String(255)) version_meta = Column(String(255)) version = composite(semver_db.DBVersion, version_prefix, version_suffix, version_meta, comparator_factory=semver_db.VersionComparator) description = Column(Text) visibility = Column(String(32), nullable=False) state = Column(String(32), nullable=False) owner = Column(String(255), nullable=False) published_at = Column(DateTime) deleted_at = Column(DateTime) def to_dict(self, show_level=ga.Showlevel.BASIC, show_text_properties=True): d = super(Artifact, self).to_dict() d.pop('type_version_prefix') d.pop('type_version_suffix') d.pop('type_version_meta') d.pop('version_prefix') d.pop('version_suffix') d.pop('version_meta') d['type_version'] = str(self.type_version) d['version'] = str(self.version) tags = [] for tag in self.tags: tags.append(tag.value) d['tags'] = tags if show_level == ga.Showlevel.NONE: return d properties = {} # sort properties self.properties.sort(key=lambda elem: (elem.name, elem.position)) for prop in self.properties: proptype, propvalue = _parse_property_type_value( prop, show_text_properties) if proptype is None: continue if prop.position is not None: # make array for p in properties.keys(): if p == prop.name: # add value to array properties[p]['value'].append(dict(type=proptype, value=propvalue)) break else: # create new array p = dict(type='array', value=[]) p['value'].append(dict(type=proptype, value=propvalue)) properties[prop.name] = p else: # make scalar properties[prop.name] = dict(type=proptype, value=propvalue) d['properties'] = properties blobs = {} # sort blobs self.blobs.sort(key=lambda elem: elem.position) for blob in self.blobs: locations = [] # sort locations blob.locations.sort(key=lambda elem: elem.position) for loc in blob.locations: locations.append(dict(value=loc.value, status=loc.status)) if blob.name in blobs: blobs[blob.name].append(dict(size=blob.size, checksum=blob.checksum, locations=locations, item_key=blob.item_key)) else: blobs[blob.name] = [] blobs[blob.name].append(dict(size=blob.size, checksum=blob.checksum, locations=locations, item_key=blob.item_key)) d['blobs'] = blobs return d class ArtifactDependency(BASE, ArtifactBase): __tablename__ = 'artifact_dependencies' __table_args__ = (Index('ix_artifact_dependencies_source_id', 'artifact_source'), Index('ix_artifact_dependencies_origin_id', 'artifact_origin'), Index('ix_artifact_dependencies_dest_id', 'artifact_dest'), Index('ix_artifact_dependencies_direct_dependencies', 'artifact_source', 'is_direct'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}) id = Column(String(36), primary_key=True, nullable=False, default=lambda: str(uuid.uuid4())) artifact_source = Column(String(36), ForeignKey('artifacts.id'), nullable=False) artifact_dest = Column(String(36), ForeignKey('artifacts.id'), nullable=False) artifact_origin = Column(String(36), ForeignKey('artifacts.id'), nullable=False) is_direct = Column(Boolean, nullable=False) position = Column(Integer) name = Column(String(36)) source = relationship('Artifact', backref=backref('dependencies', cascade="all, " "delete"), foreign_keys="ArtifactDependency.artifact_source") dest = relationship('Artifact', foreign_keys="ArtifactDependency.artifact_dest") origin = relationship('Artifact', foreign_keys="ArtifactDependency.artifact_origin") class ArtifactTag(BASE, ArtifactBase): __tablename__ = 'artifact_tags' __table_args__ = (Index('ix_artifact_tags_artifact_id', 'artifact_id'), Index('ix_artifact_tags_artifact_id_tag_value', 'artifact_id', 'value'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) id = Column(String(36), primary_key=True, nullable=False, default=lambda: str(uuid.uuid4())) artifact_id = Column(String(36), ForeignKey('artifacts.id'), nullable=False) artifact = relationship(Artifact, backref=backref('tags', cascade="all, delete-orphan")) value = Column(String(255), nullable=False) class ArtifactProperty(BASE, ArtifactBase): __tablename__ = 'artifact_properties' __table_args__ = ( Index('ix_artifact_properties_artifact_id', 'artifact_id'), Index('ix_artifact_properties_name', 'name'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) id = Column(String(36), primary_key=True, nullable=False, default=lambda: str(uuid.uuid4())) artifact_id = Column(String(36), ForeignKey('artifacts.id'), nullable=False) artifact = relationship(Artifact, backref=backref('properties', cascade="all, delete-orphan")) name = Column(String(255), nullable=False) string_value = Column(String(255)) int_value = Column(Integer) numeric_value = Column(Numeric) bool_value = Column(Boolean) text_value = Column(Text) position = Column(Integer) class ArtifactBlob(BASE, ArtifactBase): __tablename__ = 'artifact_blobs' __table_args__ = ( Index('ix_artifact_blobs_artifact_id', 'artifact_id'), Index('ix_artifact_blobs_name', 'name'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'},) id = Column(String(36), primary_key=True, nullable=False, default=lambda: str(uuid.uuid4())) artifact_id = Column(String(36), ForeignKey('artifacts.id'), nullable=False) name = Column(String(255), nullable=False) item_key = Column(String(329)) size = Column(BigInteger().with_variant(Integer, "sqlite"), nullable=False) checksum = Column(String(32)) position = Column(Integer) artifact = relationship(Artifact, backref=backref('blobs', cascade="all, delete-orphan")) class ArtifactBlobLocation(BASE, ArtifactBase): __tablename__ = 'artifact_blob_locations' __table_args__ = (Index('ix_artifact_blob_locations_blob_id', 'blob_id'), {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}) id = Column(String(36), primary_key=True, nullable=False, default=lambda: str(uuid.uuid4())) blob_id = Column(String(36), ForeignKey('artifact_blobs.id'), nullable=False) value = Column(Text, nullable=False) position = Column(Integer) status = Column(String(36), default='active', nullable=True) blob = relationship(ArtifactBlob, backref=backref('locations', cascade="all, delete-orphan")) def register_models(engine): """Create database tables for all models with the given engine.""" models = (Artifact, ArtifactTag, ArtifactProperty, ArtifactBlob, ArtifactBlobLocation, ArtifactDependency) for model in models: model.metadata.create_all(engine) def unregister_models(engine): """Drop database tables for all models with the given engine.""" models = (ArtifactDependency, ArtifactBlobLocation, ArtifactBlob, ArtifactProperty, ArtifactTag, Artifact) for model in models: model.metadata.drop_all(engine) glance-12.0.0/glance/db/sqlalchemy/metadata.py0000664000567000056710000004201512701407047022302 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2013 OpenStack Foundation # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os from os.path import isfile from os.path import join import re from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import six import sqlalchemy from sqlalchemy import and_ from sqlalchemy.schema import MetaData from sqlalchemy.sql import select from glance.common import timeutils from glance.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) metadata_opts = [ cfg.StrOpt('metadata_source_path', default='/etc/glance/metadefs/', help=_('Path to the directory where json metadata ' 'files are stored')) ] CONF = cfg.CONF CONF.register_opts(metadata_opts) def get_metadef_namespaces_table(meta): return sqlalchemy.Table('metadef_namespaces', meta, autoload=True) def get_metadef_resource_types_table(meta): return sqlalchemy.Table('metadef_resource_types', meta, autoload=True) def get_metadef_namespace_resource_types_table(meta): return sqlalchemy.Table('metadef_namespace_resource_types', meta, autoload=True) def get_metadef_properties_table(meta): return sqlalchemy.Table('metadef_properties', meta, autoload=True) def get_metadef_objects_table(meta): return sqlalchemy.Table('metadef_objects', meta, autoload=True) def get_metadef_tags_table(meta): return sqlalchemy.Table('metadef_tags', meta, autoload=True) def _get_resource_type_id(meta, name): rt_table = get_metadef_resource_types_table(meta) resource_type = ( select([rt_table.c.id]). where(rt_table.c.name == name). select_from(rt_table). execute().fetchone()) if resource_type: return resource_type[0] return None def _get_resource_type(meta, resource_type_id): rt_table = get_metadef_resource_types_table(meta) return ( rt_table.select(). where(rt_table.c.id == resource_type_id). execute().fetchone()) def _get_namespace_resource_types(meta, namespace_id): namespace_resource_types_table = ( get_metadef_namespace_resource_types_table(meta)) return ( namespace_resource_types_table.select(). where(namespace_resource_types_table.c.namespace_id == namespace_id). execute().fetchall()) def _get_namespace_resource_type_by_ids(meta, namespace_id, rt_id): namespace_resource_types_table = ( get_metadef_namespace_resource_types_table(meta)) return ( namespace_resource_types_table.select(). where(and_( namespace_resource_types_table.c.namespace_id == namespace_id, namespace_resource_types_table.c.resource_type_id == rt_id)). execute().fetchone()) def _get_properties(meta, namespace_id): properties_table = get_metadef_properties_table(meta) return ( properties_table.select(). where(properties_table.c.namespace_id == namespace_id). execute().fetchall()) def _get_objects(meta, namespace_id): objects_table = get_metadef_objects_table(meta) return ( objects_table.select(). where(objects_table.c.namespace_id == namespace_id). execute().fetchall()) def _get_tags(meta, namespace_id): tags_table = get_metadef_tags_table(meta) return ( tags_table.select(). where(tags_table.c.namespace_id == namespace_id). execute().fetchall()) def _get_resource_id(table, namespace_id, resource_name): resource = ( select([table.c.id]). where(and_(table.c.namespace_id == namespace_id, table.c.name == resource_name)). select_from(table). execute().fetchone()) if resource: return resource[0] return None def _clear_metadata(meta): metadef_tables = [get_metadef_properties_table(meta), get_metadef_objects_table(meta), get_metadef_tags_table(meta), get_metadef_namespace_resource_types_table(meta), get_metadef_namespaces_table(meta), get_metadef_resource_types_table(meta)] for table in metadef_tables: table.delete().execute() LOG.info(_LI("Table %s has been cleared"), table) def _clear_namespace_metadata(meta, namespace_id): metadef_tables = [get_metadef_properties_table(meta), get_metadef_objects_table(meta), get_metadef_tags_table(meta), get_metadef_namespace_resource_types_table(meta)] namespaces_table = get_metadef_namespaces_table(meta) for table in metadef_tables: table.delete().where(table.c.namespace_id == namespace_id).execute() namespaces_table.delete().where( namespaces_table.c.id == namespace_id).execute() def _populate_metadata(meta, metadata_path=None, merge=False, prefer_new=False, overwrite=False): if not metadata_path: metadata_path = CONF.metadata_source_path try: if isfile(metadata_path): json_schema_files = [metadata_path] else: json_schema_files = [f for f in os.listdir(metadata_path) if isfile(join(metadata_path, f)) and f.endswith('.json')] except OSError as e: LOG.error(encodeutils.exception_to_unicode(e)) return if not json_schema_files: LOG.error(_LE("Json schema files not found in %s. Aborting."), metadata_path) return namespaces_table = get_metadef_namespaces_table(meta) namespace_rt_table = get_metadef_namespace_resource_types_table(meta) objects_table = get_metadef_objects_table(meta) tags_table = get_metadef_tags_table(meta) properties_table = get_metadef_properties_table(meta) resource_types_table = get_metadef_resource_types_table(meta) for json_schema_file in json_schema_files: try: file = join(metadata_path, json_schema_file) with open(file) as json_file: metadata = json.load(json_file) except Exception as e: LOG.error(_LE("Failed to parse json file %(file_path)s while " "populating metadata due to: %(error_msg)s"), {"file_path": file, "error_msg": encodeutils.exception_to_unicode(e)}) continue values = { 'namespace': metadata.get('namespace', None), 'display_name': metadata.get('display_name', None), 'description': metadata.get('description', None), 'visibility': metadata.get('visibility', None), 'protected': metadata.get('protected', None), 'owner': metadata.get('owner', 'admin') } db_namespace = select( [namespaces_table.c.id] ).where( namespaces_table.c.namespace == values['namespace'] ).select_from( namespaces_table ).execute().fetchone() if db_namespace and overwrite: LOG.info(_LI("Overwriting namespace %s"), values['namespace']) _clear_namespace_metadata(meta, db_namespace[0]) db_namespace = None if not db_namespace: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(namespaces_table, values) db_namespace = select( [namespaces_table.c.id] ).where( namespaces_table.c.namespace == values['namespace'] ).select_from( namespaces_table ).execute().fetchone() elif not merge: LOG.info(_LI("Skipping namespace %s. It already exists in the " "database."), values['namespace']) continue elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(namespaces_table, values, namespaces_table.c.id, db_namespace[0]) namespace_id = db_namespace[0] for resource_type in metadata.get('resource_type_associations', []): rt_id = _get_resource_type_id(meta, resource_type['name']) if not rt_id: val = { 'name': resource_type['name'], 'created_at': timeutils.utcnow(), 'protected': True } _insert_data_to_db(resource_types_table, val) rt_id = _get_resource_type_id(meta, resource_type['name']) elif prefer_new: val = {'updated_at': timeutils.utcnow()} _update_data_in_db(resource_types_table, val, resource_types_table.c.id, rt_id) values = { 'namespace_id': namespace_id, 'resource_type_id': rt_id, 'properties_target': resource_type.get( 'properties_target', None), 'prefix': resource_type.get('prefix', None) } namespace_resource_type = _get_namespace_resource_type_by_ids( meta, namespace_id, rt_id) if not namespace_resource_type: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(namespace_rt_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_rt_association(namespace_rt_table, values, rt_id, namespace_id) for property, schema in six.iteritems(metadata.get('properties', {})): values = { 'name': property, 'namespace_id': namespace_id, 'json_schema': json.dumps(schema) } property_id = _get_resource_id(properties_table, namespace_id, property) if not property_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(properties_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(properties_table, values, properties_table.c.id, property_id) for object in metadata.get('objects', []): values = { 'name': object['name'], 'description': object.get('description', None), 'namespace_id': namespace_id, 'json_schema': json.dumps( object.get('properties', None)) } object_id = _get_resource_id(objects_table, namespace_id, object['name']) if not object_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(objects_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(objects_table, values, objects_table.c.id, object_id) for tag in metadata.get('tags', []): values = { 'name': tag.get('name'), 'namespace_id': namespace_id, } tag_id = _get_resource_id(tags_table, namespace_id, tag['name']) if not tag_id: values.update({'created_at': timeutils.utcnow()}) _insert_data_to_db(tags_table, values) elif prefer_new: values.update({'updated_at': timeutils.utcnow()}) _update_data_in_db(tags_table, values, tags_table.c.id, tag_id) LOG.info(_LI("File %s loaded to database."), file) LOG.info(_LI("Metadata loading finished")) def _insert_data_to_db(table, values, log_exception=True): try: table.insert(values=values).execute() except sqlalchemy.exc.IntegrityError: if log_exception: LOG.warning(_LW("Duplicate entry for values: %s"), values) def _update_data_in_db(table, values, column, value): try: (table.update(values=values). where(column == value).execute()) except sqlalchemy.exc.IntegrityError: LOG.warning(_LW("Duplicate entry for values: %s"), values) def _update_rt_association(table, values, rt_id, namespace_id): try: (table.update(values=values). where(and_(table.c.resource_type_id == rt_id, table.c.namespace_id == namespace_id)).execute()) except sqlalchemy.exc.IntegrityError: LOG.warning(_LW("Duplicate entry for values: %s"), values) def _export_data_to_file(meta, path): if not path: path = CONF.metadata_source_path namespace_table = get_metadef_namespaces_table(meta) namespaces = namespace_table.select().execute().fetchall() pattern = re.compile('[\W_]+', re.UNICODE) for id, namespace in enumerate(namespaces, start=1): namespace_id = namespace['id'] namespace_file_name = pattern.sub('', namespace['display_name']) values = { 'namespace': namespace['namespace'], 'display_name': namespace['display_name'], 'description': namespace['description'], 'visibility': namespace['visibility'], 'protected': namespace['protected'], 'resource_type_associations': [], 'properties': {}, 'objects': [], 'tags': [] } namespace_resource_types = _get_namespace_resource_types(meta, namespace_id) db_objects = _get_objects(meta, namespace_id) db_properties = _get_properties(meta, namespace_id) db_tags = _get_tags(meta, namespace_id) resource_types = [] for namespace_resource_type in namespace_resource_types: resource_type = _get_resource_type( meta, namespace_resource_type['resource_type_id']) resource_types.append({ 'name': resource_type['name'], 'prefix': namespace_resource_type['prefix'], 'properties_target': namespace_resource_type[ 'properties_target'] }) values.update({ 'resource_type_associations': resource_types }) objects = [] for object in db_objects: objects.append({ "name": object['name'], "description": object['description'], "properties": json.loads(object['json_schema']) }) values.update({ 'objects': objects }) properties = {} for property in db_properties: properties.update({ property['name']: json.loads(property['json_schema']) }) values.update({ 'properties': properties }) tags = [] for tag in db_tags: tags.append({ "name": tag['name'] }) values.update({ 'tags': tags }) try: file_name = ''.join([path, namespace_file_name, '.json']) with open(file_name, 'w') as json_file: json_file.write(json.dumps(values)) except Exception as e: LOG.exception(encodeutils.exception_to_unicode(e)) LOG.info(_LI("Namespace %(namespace)s saved in %(file)s"), { 'namespace': namespace_file_name, 'file': file_name}) def db_load_metadefs(engine, metadata_path=None, merge=False, prefer_new=False, overwrite=False): meta = MetaData() meta.bind = engine if not merge and (prefer_new or overwrite): LOG.error(_LE("To use --prefer_new or --overwrite you need to combine " "of these options with --merge option.")) return if prefer_new and overwrite and merge: LOG.error(_LE("Please provide no more than one option from this list: " "--prefer_new, --overwrite")) return _populate_metadata(meta, metadata_path, merge, prefer_new, overwrite) def db_unload_metadefs(engine): meta = MetaData() meta.bind = engine _clear_metadata(meta) def db_export_metadefs(engine, metadata_path=None): meta = MetaData() meta.bind = engine _export_data_to_file(meta, metadata_path) glance-12.0.0/glance/db/sqlalchemy/glare.py0000664000567000056710000006726012701407047021625 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import operator import uuid from enum import Enum from oslo_config import cfg from oslo_db import exception as db_exc import sqlalchemy from sqlalchemy import and_ from sqlalchemy import case from sqlalchemy import or_ import sqlalchemy.orm as orm from sqlalchemy.orm import joinedload from glance.common import exception from glance.common import semver_db from glance.common import timeutils from glance.db.sqlalchemy import models_glare as models import glance.glare as ga from glance.i18n import _LE, _LW from oslo_log import log as os_logging LOG = os_logging.getLogger(__name__) CONF = cfg.CONF class Visibility(Enum): PRIVATE = 'private' PUBLIC = 'public' SHARED = 'shared' class State(Enum): CREATING = 'creating' ACTIVE = 'active' DEACTIVATED = 'deactivated' DELETED = 'deleted' TRANSITIONS = { State.CREATING: [State.ACTIVE, State.DELETED], State.ACTIVE: [State.DEACTIVATED, State.DELETED], State.DEACTIVATED: [State.ACTIVE, State.DELETED], State.DELETED: [] } def create(context, values, session, type_name, type_version=None): return _out(_create_or_update(context, values, None, session, type_name, type_version)) def update(context, values, artifact_id, session, type_name, type_version=None): return _out(_create_or_update(context, values, artifact_id, session, type_name, type_version)) def delete(context, artifact_id, session, type_name, type_version=None): values = {'state': 'deleted'} return _out(_create_or_update(context, values, artifact_id, session, type_name, type_version)) def _create_or_update(context, values, artifact_id, session, type_name, type_version=None): values = copy.deepcopy(values) with session.begin(): _set_version_fields(values) _validate_values(values) _drop_protected_attrs(models.Artifact, values) if artifact_id: # update existing artifact state = values.get('state') show_level = ga.Showlevel.BASIC if state is not None: if state == 'active': show_level = ga.Showlevel.DIRECT values['published_at'] = timeutils.utcnow() if state == 'deleted': values['deleted_at'] = timeutils.utcnow() artifact = _get(context, artifact_id, session, type_name, type_version, show_level=show_level) _validate_transition(artifact.state, values.get('state') or artifact.state) else: # create new artifact artifact = models.Artifact() if 'id' not in values: artifact.id = str(uuid.uuid4()) else: artifact.id = values['id'] if 'tags' in values: tags = values.pop('tags') artifact.tags = _do_tags(artifact, tags) if 'properties' in values: properties = values.pop('properties', {}) artifact.properties = _do_properties(artifact, properties) if 'blobs' in values: blobs = values.pop('blobs') artifact.blobs = _do_blobs(artifact, blobs) if 'dependencies' in values: dependencies = values.pop('dependencies') _do_dependencies(artifact, dependencies, session) if values.get('state', None) == 'publish': artifact.dependencies.extend( _do_transitive_dependencies(artifact, session)) artifact.update(values) try: artifact.save(session=session) except db_exc.DBDuplicateEntry: LOG.warn(_LW("Artifact with the specified type, name and version " "already exists")) raise exception.ArtifactDuplicateNameTypeVersion() return artifact def get(context, artifact_id, session, type_name=None, type_version=None, show_level=ga.Showlevel.BASIC): artifact = _get(context, artifact_id, session, type_name, type_version, show_level) return _out(artifact, show_level) def publish(context, artifact_id, session, type_name, type_version=None): """ Because transitive dependencies are not initially created it has to be done manually by calling this function. It creates transitive dependencies for the given artifact_id and saves them in DB. :returns: artifact dict with Transitive show level """ values = {'state': 'active'} return _out(_create_or_update(context, values, artifact_id, session, type_name, type_version)) def _validate_transition(source_state, target_state): if target_state == source_state: return try: source_state = State(source_state) target_state = State(target_state) except ValueError: raise exception.InvalidArtifactStateTransition(source=source_state, target=target_state) if (source_state not in TRANSITIONS or target_state not in TRANSITIONS[source_state]): raise exception.InvalidArtifactStateTransition(source=source_state, target=target_state) def _out(artifact, show_level=ga.Showlevel.BASIC, show_text_properties=True): """ Transforms sqlalchemy object into dict depending on the show level. :param artifact: sql :param show_level: constant from Showlevel class :param show_text_properties: for performance optimization it's possible to disable loading of massive text properties :returns: generated dict """ res = artifact.to_dict(show_level=show_level, show_text_properties=show_text_properties) if show_level >= ga.Showlevel.DIRECT: dependencies = artifact.dependencies dependencies.sort(key=lambda elem: (elem.artifact_origin, elem.name, elem.position)) res['dependencies'] = {} if show_level == ga.Showlevel.DIRECT: new_show_level = ga.Showlevel.BASIC else: new_show_level = ga.Showlevel.TRANSITIVE for dep in dependencies: if dep.artifact_origin == artifact.id: # make array for p in res['dependencies'].keys(): if p == dep.name: # add value to array res['dependencies'][p].append( _out(dep.dest, new_show_level)) break else: # create new array deparr = [_out(dep.dest, new_show_level)] res['dependencies'][dep.name] = deparr return res def _get(context, artifact_id, session, type_name=None, type_version=None, show_level=ga.Showlevel.BASIC): values = dict(id=artifact_id) if type_name is not None: values['type_name'] = type_name if type_version is not None: values['type_version'] = type_version _set_version_fields(values) try: if show_level == ga.Showlevel.NONE: query = ( session.query(models.Artifact). options(joinedload(models.Artifact.tags)). filter_by(**values)) else: query = ( session.query(models.Artifact). options(joinedload(models.Artifact.properties)). options(joinedload(models.Artifact.tags)). options(joinedload(models.Artifact.blobs). joinedload(models.ArtifactBlob.locations)). filter_by(**values)) artifact = query.one() except orm.exc.NoResultFound: LOG.warn(_LW("Artifact with id=%s not found") % artifact_id) raise exception.ArtifactNotFound(id=artifact_id) if not _check_visibility(context, artifact): LOG.warn(_LW("Artifact with id=%s is not accessible") % artifact_id) raise exception.ArtifactForbidden(id=artifact_id) return artifact def get_all(context, session, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, show_level=ga.Showlevel.NONE): """List all visible artifacts""" filters = filters or {} artifacts = _get_all( context, session, filters, marker, limit, sort_keys, sort_dirs, show_level) return map(lambda ns: _out(ns, show_level, show_text_properties=False), artifacts) def _get_all(context, session, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, show_level=ga.Showlevel.NONE): """Get all namespaces that match zero or more filters. :param filters: dict of filter keys and values. :param marker: namespace id after which to start page :param limit: maximum number of namespaces to return :param sort_keys: namespace attributes by which results should be sorted :param sort_dirs: directions in which results should be sorted (asc, desc) """ filters = filters or {} query = _do_artifacts_query(context, session, show_level) basic_conds, tag_conds, prop_conds = _do_query_filters(filters) if basic_conds: for basic_condition in basic_conds: query = query.filter(and_(*basic_condition)) if tag_conds: for tag_condition in tag_conds: query = query.join(models.ArtifactTag, aliased=True).filter( and_(*tag_condition)) if prop_conds: for prop_condition in prop_conds: query = query.join(models.ArtifactProperty, aliased=True).filter( and_(*prop_condition)) marker_artifact = None if marker is not None: marker_artifact = _get(context, marker, session, None, None) if sort_keys is None: sort_keys = [('created_at', None), ('id', None)] sort_dirs = ['desc', 'desc'] else: for key in [('created_at', None), ('id', None)]: if key not in sort_keys: sort_keys.append(key) sort_dirs.append('desc') # Note(mfedosin): Workaround to deal with situation that sqlalchemy cannot # work with composite keys correctly if ('version', None) in sort_keys: i = sort_keys.index(('version', None)) version_sort_dir = sort_dirs[i] sort_keys[i:i + 1] = [('version_prefix', None), ('version_suffix', None), ('version_meta', None)] sort_dirs[i:i + 1] = [version_sort_dir] * 3 query = _do_paginate_query(query=query, limit=limit, sort_keys=sort_keys, marker=marker_artifact, sort_dirs=sort_dirs) return query.all() def _do_paginate_query(query, sort_keys=None, sort_dirs=None, marker=None, limit=None): # Default the sort direction to ascending sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir] * len(sort_keys) assert(len(sort_dirs) == len(sort_keys)) # nosec # nosec: This function runs safely if the assertion fails. if len(sort_dirs) < len(sort_keys): sort_dirs += [sort_dir] * (len(sort_keys) - len(sort_dirs)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): try: sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] except KeyError: raise ValueError(_LE("Unknown sort direction, " "must be 'desc' or 'asc'")) if current_sort_key[1] is None: # sort by generic property query = query.order_by(sort_dir_func(getattr( models.Artifact, current_sort_key[0]))) else: # sort by custom property prop_type = current_sort_key[1] + "_value" query = ( query.join(models.ArtifactProperty). filter(models.ArtifactProperty.name == current_sort_key[0]). order_by(sort_dir_func(getattr(models.ArtifactProperty, prop_type)))) default = '' # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key[0]) if v is None: v = default marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(len(sort_keys)): crit_attrs = [] if marker_values[i] is None: continue for j in range(i): if sort_keys[j][1] is None: model_attr = getattr(models.Artifact, sort_keys[j][0]) else: model_attr = getattr(models.ArtifactProperty, sort_keys[j][1] + "_value") default = None if isinstance( model_attr.property.columns[0].type, sqlalchemy.DateTime) else '' attr = case([(model_attr != None, model_attr), ], else_=default) crit_attrs.append((attr == marker_values[j])) if sort_keys[i][1] is None: model_attr = getattr(models.Artifact, sort_keys[i][0]) else: model_attr = getattr(models.ArtifactProperty, sort_keys[i][1] + "_value") default = None if isinstance(model_attr.property.columns[0].type, sqlalchemy.DateTime) else '' attr = case([(model_attr != None, model_attr), ], else_=default) if sort_dirs[i] == 'desc': crit_attrs.append((attr < marker_values[i])) else: crit_attrs.append((attr > marker_values[i])) criteria = and_(*crit_attrs) criteria_list.append(criteria) f = or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query def _do_artifacts_query(context, session, show_level=ga.Showlevel.NONE): """Build the query to get all artifacts based on the context""" LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s", {'is_admin': context.is_admin, 'owner': context.owner}) if show_level == ga.Showlevel.NONE: query = session.query(models.Artifact).options( joinedload(models.Artifact.tags)) elif show_level == ga.Showlevel.BASIC: query = ( session.query(models.Artifact). options(joinedload( models.Artifact.properties). defer(models.ArtifactProperty.text_value)). options(joinedload(models.Artifact.tags)). options(joinedload(models.Artifact.blobs). joinedload(models.ArtifactBlob.locations))) else: # other show_levels aren't supported msg = _LW("Show level %s is not supported in this " "operation") % ga.Showlevel.to_str(show_level) LOG.warn(msg) raise exception.ArtifactUnsupportedShowLevel(shl=show_level) # If admin, return everything. if context.is_admin: return query else: # If regular user, return only public artifacts. # However, if context.owner has a value, return both # public and private artifacts of the context.owner. if context.owner is not None: query = query.filter( or_(models.Artifact.owner == context.owner, models.Artifact.visibility == 'public')) else: query = query.filter( models.Artifact.visibility == 'public') return query op_mappings = { 'EQ': operator.eq, 'GT': operator.gt, 'GE': operator.ge, 'LT': operator.lt, 'LE': operator.le, 'NE': operator.ne, 'IN': operator.eq # it must be eq } def _do_query_filters(filters): basic_conds = [] tag_conds = [] prop_conds = [] # don't show deleted artifacts basic_conds.append([models.Artifact.state != 'deleted']) visibility = filters.pop('visibility', None) if visibility is not None: # ignore operator. always consider it EQ basic_conds.append( [models.Artifact.visibility == visibility[0]['value']]) type_name = filters.pop('type_name', None) if type_name is not None: # ignore operator. always consider it EQ basic_conds.append([models.Artifact.type_name == type_name['value']]) type_version = filters.pop('type_version', None) if type_version is not None: # ignore operator. always consider it EQ # TODO(mfedosin) add support of LIKE operator type_version = semver_db.parse(type_version['value']) basic_conds.append([models.Artifact.type_version == type_version]) name = filters.pop('name', None) if name is not None: # ignore operator. always consider it EQ basic_conds.append([models.Artifact.name == name[0]['value']]) versions = filters.pop('version', None) if versions is not None: for version in versions: value = semver_db.parse(version['value']) op = version['operator'] fn = op_mappings[op] basic_conds.append([fn(models.Artifact.version, value)]) state = filters.pop('state', None) if state is not None: # ignore operator. always consider it EQ basic_conds.append([models.Artifact.state == state['value']]) owner = filters.pop('owner', None) if owner is not None: # ignore operator. always consider it EQ basic_conds.append([models.Artifact.owner == owner[0]['value']]) id_list = filters.pop('id_list', None) if id_list is not None: basic_conds.append([models.Artifact.id.in_(id_list['value'])]) name_list = filters.pop('name_list', None) if name_list is not None: basic_conds.append([models.Artifact.name.in_(name_list['value'])]) tags = filters.pop('tags', None) if tags is not None: for tag in tags: tag_conds.append([models.ArtifactTag.value == tag['value']]) # process remaining filters for filtername, filtervalues in filters.items(): for filtervalue in filtervalues: db_prop_op = filtervalue['operator'] db_prop_value = filtervalue['value'] db_prop_type = filtervalue['type'] + "_value" db_prop_position = filtervalue.get('position') conds = [models.ArtifactProperty.name == filtername] if db_prop_op in op_mappings: fn = op_mappings[db_prop_op] result = fn(getattr(models.ArtifactProperty, db_prop_type), db_prop_value) cond = [result] if db_prop_position is not 'any': cond.append( models.ArtifactProperty.position == db_prop_position) if db_prop_op == 'IN': if (db_prop_position is not None and db_prop_position is not 'any'): msg = _LE("Cannot use this parameter with " "the operator IN") LOG.error(msg) raise exception.ArtifactInvalidPropertyParameter( op='IN') cond = [result, models.ArtifactProperty.position >= 0] else: msg = _LE("Operator %s is not supported") % db_prop_op LOG.error(msg) raise exception.ArtifactUnsupportedPropertyOperator( op=db_prop_op) conds.extend(cond) prop_conds.append(conds) return basic_conds, tag_conds, prop_conds def _do_tags(artifact, new_tags): tags_to_update = [] # don't touch existing tags for tag in artifact.tags: if tag.value in new_tags: tags_to_update.append(tag) new_tags.remove(tag.value) # add new tags for tag in new_tags: db_tag = models.ArtifactTag() db_tag.value = tag tags_to_update.append(db_tag) return tags_to_update def _do_property(propname, prop, position=None): db_prop = models.ArtifactProperty() db_prop.name = propname setattr(db_prop, (prop['type'] + "_value"), prop['value']) db_prop.position = position return db_prop def _do_properties(artifact, new_properties): props_to_update = [] # don't touch existing properties for prop in artifact.properties: if prop.name not in new_properties: props_to_update.append(prop) for propname, prop in new_properties.items(): if prop['type'] == 'array': for pos, arrprop in enumerate(prop['value']): props_to_update.append( _do_property(propname, arrprop, pos) ) else: props_to_update.append( _do_property(propname, prop) ) return props_to_update def _do_blobs(artifact, new_blobs): blobs_to_update = [] # don't touch existing blobs for blob in artifact.blobs: if blob.name not in new_blobs: blobs_to_update.append(blob) for blobname, blobs in new_blobs.items(): for pos, blob in enumerate(blobs): for db_blob in artifact.blobs: if db_blob.name == blobname and db_blob.position == pos: # update existing blobs db_blob.size = blob['size'] db_blob.checksum = blob['checksum'] db_blob.item_key = blob['item_key'] db_blob.locations = _do_locations(db_blob, blob['locations']) blobs_to_update.append(db_blob) break else: # create new blob db_blob = models.ArtifactBlob() db_blob.name = blobname db_blob.size = blob['size'] db_blob.checksum = blob['checksum'] db_blob.item_key = blob['item_key'] db_blob.position = pos db_blob.locations = _do_locations(db_blob, blob['locations']) blobs_to_update.append(db_blob) return blobs_to_update def _do_locations(blob, new_locations): locs_to_update = [] for pos, loc in enumerate(new_locations): for db_loc in blob.locations: if db_loc.value == loc['value']: # update existing location db_loc.position = pos db_loc.status = loc['status'] locs_to_update.append(db_loc) break else: # create new location db_loc = models.ArtifactBlobLocation() db_loc.value = loc['value'] db_loc.status = loc['status'] db_loc.position = pos locs_to_update.append(db_loc) return locs_to_update def _do_dependencies(artifact, new_dependencies, session): deps_to_update = [] # small check that all dependencies are new if artifact.dependencies is not None: for db_dep in artifact.dependencies: for dep in new_dependencies.keys(): if db_dep.name == dep: msg = _LW("Artifact with the specified type, name " "and versions already has the direct " "dependency=%s") % dep LOG.warn(msg) # change values of former dependency for dep in artifact.dependencies: session.delete(dep) artifact.dependencies = [] for depname, depvalues in new_dependencies.items(): for pos, depvalue in enumerate(depvalues): db_dep = models.ArtifactDependency() db_dep.name = depname db_dep.artifact_source = artifact.id db_dep.artifact_dest = depvalue db_dep.artifact_origin = artifact.id db_dep.is_direct = True db_dep.position = pos deps_to_update.append(db_dep) artifact.dependencies = deps_to_update def _do_transitive_dependencies(artifact, session): deps_to_update = [] for dependency in artifact.dependencies: depvalue = dependency.artifact_dest transitdeps = session.query(models.ArtifactDependency).filter_by( artifact_source=depvalue).all() for transitdep in transitdeps: if not transitdep.is_direct: # transitive dependencies are already created msg = _LW("Artifact with the specified type, " "name and version already has the " "direct dependency=%d") % transitdep.id LOG.warn(msg) raise exception.ArtifactDuplicateTransitiveDependency( dep=transitdep.id) db_dep = models.ArtifactDependency() db_dep.name = transitdep['name'] db_dep.artifact_source = artifact.id db_dep.artifact_dest = transitdep.artifact_dest db_dep.artifact_origin = transitdep.artifact_source db_dep.is_direct = False db_dep.position = transitdep.position deps_to_update.append(db_dep) return deps_to_update def _check_visibility(context, artifact): if context.is_admin: return True if not artifact.owner: return True if artifact.visibility == Visibility.PUBLIC.value: return True if artifact.visibility == Visibility.PRIVATE.value: if context.owner and context.owner == artifact.owner: return True else: return False if artifact.visibility == Visibility.SHARED.value: return False return False def _set_version_fields(values): if 'type_version' in values: values['type_version'] = semver_db.parse(values['type_version']) if 'version' in values: values['version'] = semver_db.parse(values['version']) def _validate_values(values): if 'state' in values: try: State(values['state']) except ValueError: msg = "Invalid artifact state '%s'" % values['state'] raise exception.Invalid(msg) if 'visibility' in values: try: Visibility(values['visibility']) except ValueError: msg = "Invalid artifact visibility '%s'" % values['visibility'] raise exception.Invalid(msg) # TODO(mfedosin): it's an idea to validate tags someday # (check that all tags match the regexp) def _drop_protected_attrs(model_class, values): """ Removed protected attributes from values dictionary using the models __protected_attributes__ field. """ for attr in model_class.__protected_attributes__: if attr in values: del values[attr] glance-12.0.0/glance/db/sqlalchemy/models.py0000664000567000056710000002322312701407047022005 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for glance data """ import uuid from oslo_db.sqlalchemy import models from oslo_serialization import jsonutils from sqlalchemy import BigInteger from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy.orm import backref, relationship from sqlalchemy import sql from sqlalchemy import String from sqlalchemy import Text from sqlalchemy.types import TypeDecorator from sqlalchemy import UniqueConstraint from glance.common import timeutils BASE = declarative_base() class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string""" impl = Text def process_bind_param(self, value, dialect): if value is not None: value = jsonutils.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value class GlanceBase(models.ModelBase, models.TimestampMixin): """Base class for Glance Models.""" __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} __table_initialized__ = False __protected_attributes__ = set([ "created_at", "updated_at", "deleted_at", "deleted"]) def save(self, session=None): from glance.db.sqlalchemy import api as db_api super(GlanceBase, self).save(session or db_api.get_session()) created_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=False) # TODO(vsergeyev): Column `updated_at` have no default value in # OpenStack common code. We should decide, is this value # required and make changes in oslo (if required) or # in glance (if not). updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), nullable=True, onupdate=lambda: timeutils.utcnow()) # TODO(boris-42): Use SoftDeleteMixin instead of deleted Column after # migration that provides UniqueConstraints and change # type of this column. deleted_at = Column(DateTime) deleted = Column(Boolean, nullable=False, default=False) def delete(self, session=None): """Delete this object.""" self.deleted = True self.deleted_at = timeutils.utcnow() self.save(session=session) def keys(self): return self.__dict__.keys() def values(self): return self.__dict__.values() def items(self): return self.__dict__.items() def to_dict(self): d = self.__dict__.copy() # NOTE(flaper87): Remove # private state instance # It is not serializable # and causes CircularReference d.pop("_sa_instance_state") return d class Image(BASE, GlanceBase): """Represents an image in the datastore.""" __tablename__ = 'images' __table_args__ = (Index('checksum_image_idx', 'checksum'), Index('ix_images_is_public', 'is_public'), Index('ix_images_deleted', 'deleted'), Index('owner_image_idx', 'owner'), Index('created_at_image_idx', 'created_at'), Index('updated_at_image_idx', 'updated_at')) id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) name = Column(String(255)) disk_format = Column(String(20)) container_format = Column(String(20)) size = Column(BigInteger().with_variant(Integer, "sqlite")) virtual_size = Column(BigInteger().with_variant(Integer, "sqlite")) status = Column(String(30), nullable=False) is_public = Column(Boolean, nullable=False, default=False) checksum = Column(String(32)) min_disk = Column(Integer, nullable=False, default=0) min_ram = Column(Integer, nullable=False, default=0) owner = Column(String(255)) protected = Column(Boolean, nullable=False, default=False, server_default=sql.expression.false()) class ImageProperty(BASE, GlanceBase): """Represents an image properties in the datastore.""" __tablename__ = 'image_properties' __table_args__ = (Index('ix_image_properties_image_id', 'image_id'), Index('ix_image_properties_deleted', 'deleted'), UniqueConstraint('image_id', 'name', name='ix_image_properties_' 'image_id_name'),) id = Column(Integer, primary_key=True) image_id = Column(String(36), ForeignKey('images.id'), nullable=False) image = relationship(Image, backref=backref('properties')) name = Column(String(255), nullable=False) value = Column(Text) class ImageTag(BASE, GlanceBase): """Represents an image tag in the datastore.""" __tablename__ = 'image_tags' __table_args__ = (Index('ix_image_tags_image_id', 'image_id'), Index('ix_image_tags_image_id_tag_value', 'image_id', 'value'),) id = Column(Integer, primary_key=True, nullable=False) image_id = Column(String(36), ForeignKey('images.id'), nullable=False) image = relationship(Image, backref=backref('tags')) value = Column(String(255), nullable=False) class ImageLocation(BASE, GlanceBase): """Represents an image location in the datastore.""" __tablename__ = 'image_locations' __table_args__ = (Index('ix_image_locations_image_id', 'image_id'), Index('ix_image_locations_deleted', 'deleted'),) id = Column(Integer, primary_key=True, nullable=False) image_id = Column(String(36), ForeignKey('images.id'), nullable=False) image = relationship(Image, backref=backref('locations')) value = Column(Text(), nullable=False) meta_data = Column(JSONEncodedDict(), default={}) status = Column(String(30), server_default='active', nullable=False) class ImageMember(BASE, GlanceBase): """Represents an image members in the datastore.""" __tablename__ = 'image_members' unique_constraint_key_name = 'image_members_image_id_member_deleted_at_key' __table_args__ = (Index('ix_image_members_deleted', 'deleted'), Index('ix_image_members_image_id', 'image_id'), Index('ix_image_members_image_id_member', 'image_id', 'member'), UniqueConstraint('image_id', 'member', 'deleted_at', name=unique_constraint_key_name),) id = Column(Integer, primary_key=True) image_id = Column(String(36), ForeignKey('images.id'), nullable=False) image = relationship(Image, backref=backref('members')) member = Column(String(255), nullable=False) can_share = Column(Boolean, nullable=False, default=False) status = Column(String(20), nullable=False, default="pending", server_default='pending') class Task(BASE, GlanceBase): """Represents an task in the datastore""" __tablename__ = 'tasks' __table_args__ = (Index('ix_tasks_type', 'type'), Index('ix_tasks_status', 'status'), Index('ix_tasks_owner', 'owner'), Index('ix_tasks_deleted', 'deleted'), Index('ix_tasks_updated_at', 'updated_at')) id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) type = Column(String(30), nullable=False) status = Column(String(30), nullable=False) owner = Column(String(255), nullable=False) expires_at = Column(DateTime, nullable=True) class TaskInfo(BASE, models.ModelBase): """Represents task info in the datastore""" __tablename__ = 'task_info' task_id = Column(String(36), ForeignKey('tasks.id'), primary_key=True, nullable=False) task = relationship(Task, backref=backref('info', uselist=False)) # NOTE(nikhil): input and result are stored as text in the DB. # SQLAlchemy marshals the data to/from JSON using custom type # JSONEncodedDict. It uses simplejson underneath. input = Column(JSONEncodedDict()) result = Column(JSONEncodedDict()) message = Column(Text) def register_models(engine): """Create database tables for all models with the given engine.""" models = (Image, ImageProperty, ImageMember) for model in models: model.metadata.create_all(engine) def unregister_models(engine): """Drop database tables for all models with the given engine.""" models = (Image, ImageProperty) for model in models: model.metadata.drop_all(engine) glance-12.0.0/glance/db/sqlalchemy/metadef_api/0000775000567000056710000000000012701407204022377 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/sqlalchemy/metadef_api/property.py0000664000567000056710000001370212701407047024645 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy import func import sqlalchemy.orm as sa_orm from glance.common import exception as exc from glance.db.sqlalchemy.metadef_api import namespace as namespace_api from glance.db.sqlalchemy.metadef_api import utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models from glance.i18n import _ LOG = logging.getLogger(__name__) def _get(context, property_id, session): try: query = session.query(models.MetadefProperty).filter_by(id=property_id) property_rec = query.one() except sa_orm.exc.NoResultFound: msg = (_("Metadata definition property not found for id=%s") % property_id) LOG.warn(msg) raise exc.MetadefPropertyNotFound(msg) return property_rec def _get_by_name(context, namespace_name, name, session): """get a property; raise if ns not found/visible or property not found""" namespace = namespace_api.get(context, namespace_name, session) try: query = session.query(models.MetadefProperty).filter_by( name=name, namespace_id=namespace['id']) property_rec = query.one() except sa_orm.exc.NoResultFound: LOG.debug("The metadata definition property with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': name, 'namespace_name': namespace_name}) raise exc.MetadefPropertyNotFound(property_name=name, namespace_name=namespace_name) return property_rec def get(context, namespace_name, name, session): """get a property; raise if ns not found/visible or property not found""" property_rec = _get_by_name(context, namespace_name, name, session) return property_rec.to_dict() def get_all(context, namespace_name, session): namespace = namespace_api.get(context, namespace_name, session) query = session.query(models.MetadefProperty).filter_by( namespace_id=namespace['id']) properties = query.all() properties_list = [] for prop in properties: properties_list.append(prop.to_dict()) return properties_list def create(context, namespace_name, values, session): namespace = namespace_api.get(context, namespace_name, session) values.update({'namespace_id': namespace['id']}) property_rec = models.MetadefProperty() metadef_utils.drop_protected_attrs(models.MetadefProperty, values) property_rec.update(values.copy()) try: property_rec.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Can not create metadata definition property. A property" " with name=%(name)s already exists in" " namespace=%(namespace_name)s.", {'name': property_rec.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateProperty( property_name=property_rec.name, namespace_name=namespace_name) return property_rec.to_dict() def update(context, namespace_name, property_id, values, session): """Update a property, raise if ns not found/visible or duplicate result""" namespace_api.get(context, namespace_name, session) property_rec = _get(context, property_id, session) metadef_utils.drop_protected_attrs(models.MetadefProperty, values) # values['updated_at'] = timeutils.utcnow() - done by TS mixin try: property_rec.update(values.copy()) property_rec.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Invalid update. It would result in a duplicate" " metadata definition property with the same name=%(name)s" " in namespace=%(namespace_name)s.", {'name': property_rec.name, 'namespace_name': namespace_name}) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition property with the same name=%(name)s" " in namespace=%(namespace_name)s.") % {'name': property_rec.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateProperty(emsg) return property_rec.to_dict() def delete(context, namespace_name, property_name, session): property_rec = _get_by_name( context, namespace_name, property_name, session) if property_rec: session.delete(property_rec) session.flush() return property_rec.to_dict() def delete_namespace_content(context, namespace_id, session): """Use this def only if the ns for the id has been verified as visible""" count = 0 query = session.query(models.MetadefProperty).filter_by( namespace_id=namespace_id) count = query.delete(synchronize_session='fetch') return count def delete_by_namespace_name(context, namespace_name, session): namespace = namespace_api.get(context, namespace_name, session) return delete_namespace_content(context, namespace['id'], session) def count(context, namespace_name, session): """Get the count of properties for a namespace, raise if ns not found""" namespace = namespace_api.get(context, namespace_name, session) query = session.query(func.count(models.MetadefProperty.id)).filter_by( namespace_id=namespace['id']) return query.scalar() glance-12.0.0/glance/db/sqlalchemy/metadef_api/object.py0000664000567000056710000001314312701407047024226 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from sqlalchemy import func import sqlalchemy.orm as sa_orm from glance.common import exception as exc from glance.db.sqlalchemy.metadef_api import namespace as namespace_api import glance.db.sqlalchemy.metadef_api.utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models from glance.i18n import _ LOG = logging.getLogger(__name__) def _get(context, object_id, session): try: query = session.query(models.MetadefObject).filter_by(id=object_id) metadef_object = query.one() except sa_orm.exc.NoResultFound: msg = (_("Metadata definition object not found for id=%s") % object_id) LOG.warn(msg) raise exc.MetadefObjectNotFound(msg) return metadef_object def _get_by_name(context, namespace_name, name, session): namespace = namespace_api.get(context, namespace_name, session) try: query = session.query(models.MetadefObject).filter_by( name=name, namespace_id=namespace['id']) metadef_object = query.one() except sa_orm.exc.NoResultFound: LOG.debug("The metadata definition object with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': name, 'namespace_name': namespace_name}) raise exc.MetadefObjectNotFound(object_name=name, namespace_name=namespace_name) return metadef_object def get_all(context, namespace_name, session): namespace = namespace_api.get(context, namespace_name, session) query = session.query(models.MetadefObject).filter_by( namespace_id=namespace['id']) md_objects = query.all() md_objects_list = [] for obj in md_objects: md_objects_list.append(obj.to_dict()) return md_objects_list def create(context, namespace_name, values, session): namespace = namespace_api.get(context, namespace_name, session) values.update({'namespace_id': namespace['id']}) md_object = models.MetadefObject() metadef_utils.drop_protected_attrs(models.MetadefObject, values) md_object.update(values.copy()) try: md_object.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("A metadata definition object with name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': md_object.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateObject( object_name=md_object.name, namespace_name=namespace_name) return md_object.to_dict() def get(context, namespace_name, name, session): md_object = _get_by_name(context, namespace_name, name, session) return md_object.to_dict() def update(context, namespace_name, object_id, values, session): """Update an object, raise if ns not found/visible or duplicate result""" namespace_api.get(context, namespace_name, session) md_object = _get(context, object_id, session) metadef_utils.drop_protected_attrs(models.MetadefObject, values) # values['updated_at'] = timeutils.utcnow() - done by TS mixin try: md_object.update(values.copy()) md_object.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Invalid update. It would result in a duplicate" " metadata definition object with same name=%(name)s" " in namespace=%(namespace_name)s.", {'name': md_object.name, 'namespace_name': namespace_name}) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition object with the same name=%(name)s" " in namespace=%(namespace_name)s.") % {'name': md_object.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateObject(emsg) return md_object.to_dict() def delete(context, namespace_name, object_name, session): namespace_api.get(context, namespace_name, session) md_object = _get_by_name(context, namespace_name, object_name, session) session.delete(md_object) session.flush() return md_object.to_dict() def delete_namespace_content(context, namespace_id, session): """Use this def only if the ns for the id has been verified as visible""" count = 0 query = session.query(models.MetadefObject).filter_by( namespace_id=namespace_id) count = query.delete(synchronize_session='fetch') return count def delete_by_namespace_name(context, namespace_name, session): namespace = namespace_api.get(context, namespace_name, session) return delete_namespace_content(context, namespace['id'], session) def count(context, namespace_name, session): """Get the count of objects for a namespace, raise if ns not found""" namespace = namespace_api.get(context, namespace_name, session) query = session.query(func.count(models.MetadefObject.id)).filter_by( namespace_id=namespace['id']) return query.scalar() glance-12.0.0/glance/db/sqlalchemy/metadef_api/utils.py0000664000567000056710000000163112701407047024117 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def drop_protected_attrs(model_class, values): """ Removed protected attributes from values dictionary using the models __protected_attributes__ field. """ for attr in model_class.__protected_attributes__: if attr in values: del values[attr] glance-12.0.0/glance/db/sqlalchemy/metadef_api/__init__.py0000664000567000056710000000000012701407047024503 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/db/sqlalchemy/metadef_api/tag.py0000664000567000056710000001671612701407047023544 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_db.sqlalchemy.utils import paginate_query from oslo_log import log as logging from sqlalchemy import func import sqlalchemy.orm as sa_orm from glance.common import exception as exc from glance.db.sqlalchemy.metadef_api import namespace as namespace_api import glance.db.sqlalchemy.metadef_api.utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models from glance.i18n import _LW LOG = logging.getLogger(__name__) def _get(context, id, session): try: query = (session.query(models.MetadefTag).filter_by(id=id)) metadef_tag = query.one() except sa_orm.exc.NoResultFound: msg = (_LW("Metadata tag not found for id %s") % id) LOG.warn(msg) raise exc.MetadefTagNotFound(message=msg) return metadef_tag def _get_by_name(context, namespace_name, name, session): namespace = namespace_api.get(context, namespace_name, session) try: query = (session.query(models.MetadefTag).filter_by( name=name, namespace_id=namespace['id'])) metadef_tag = query.one() except sa_orm.exc.NoResultFound: LOG.debug("The metadata tag with name=%(name)s" " was not found in namespace=%(namespace_name)s.", {'name': name, 'namespace_name': namespace_name}) raise exc.MetadefTagNotFound(name=name, namespace_name=namespace_name) return metadef_tag def get_all(context, namespace_name, session, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc'): """Get all tags that match zero or more filters. :param filters: dict of filter keys and values. :param marker: tag id after which to start page :param limit: maximum number of namespaces to return :param sort_key: namespace attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) """ namespace = namespace_api.get(context, namespace_name, session) query = (session.query(models.MetadefTag).filter_by( namespace_id=namespace['id'])) marker_tag = None if marker is not None: marker_tag = _get(context, marker, session) sort_keys = ['created_at', 'id'] sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys query = paginate_query(query=query, model=models.MetadefTag, limit=limit, sort_keys=sort_keys, marker=marker_tag, sort_dir=sort_dir) metadef_tag = query.all() metadef_tag_list = [] for tag in metadef_tag: metadef_tag_list.append(tag.to_dict()) return metadef_tag_list def create(context, namespace_name, values, session): namespace = namespace_api.get(context, namespace_name, session) values.update({'namespace_id': namespace['id']}) metadef_tag = models.MetadefTag() metadef_utils.drop_protected_attrs(models.MetadefTag, values) metadef_tag.update(values.copy()) try: metadef_tag.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("A metadata tag name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': metadef_tag.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateTag( name=metadef_tag.name, namespace_name=namespace_name) return metadef_tag.to_dict() def create_tags(context, namespace_name, tag_list, session): metadef_tags_list = [] if tag_list: namespace = namespace_api.get(context, namespace_name, session) try: with session.begin(): query = (session.query(models.MetadefTag).filter_by( namespace_id=namespace['id'])) query.delete(synchronize_session='fetch') for value in tag_list: value.update({'namespace_id': namespace['id']}) metadef_utils.drop_protected_attrs( models.MetadefTag, value) metadef_tag = models.MetadefTag() metadef_tag.update(value.copy()) metadef_tag.save(session=session) metadef_tags_list.append(metadef_tag.to_dict()) except db_exc.DBDuplicateEntry: LOG.debug("A metadata tag name=%(name)s" " in namespace=%(namespace_name)s already exists.", {'name': metadef_tag.name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateTag( name=metadef_tag.name, namespace_name=namespace_name) return metadef_tags_list def get(context, namespace_name, name, session): metadef_tag = _get_by_name(context, namespace_name, name, session) return metadef_tag.to_dict() def update(context, namespace_name, id, values, session): """Update an tag, raise if ns not found/visible or duplicate result""" namespace_api.get(context, namespace_name, session) metadata_tag = _get(context, id, session) metadef_utils.drop_protected_attrs(models.MetadefTag, values) # values['updated_at'] = timeutils.utcnow() - done by TS mixin try: metadata_tag.update(values.copy()) metadata_tag.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Invalid update. It would result in a duplicate" " metadata tag with same name=%(name)s" " in namespace=%(namespace_name)s.", {'name': values['name'], 'namespace_name': namespace_name}) raise exc.MetadefDuplicateTag( name=values['name'], namespace_name=namespace_name) return metadata_tag.to_dict() def delete(context, namespace_name, name, session): namespace_api.get(context, namespace_name, session) md_tag = _get_by_name(context, namespace_name, name, session) session.delete(md_tag) session.flush() return md_tag.to_dict() def delete_namespace_content(context, namespace_id, session): """Use this def only if the ns for the id has been verified as visible""" count = 0 query = (session.query(models.MetadefTag).filter_by( namespace_id=namespace_id)) count = query.delete(synchronize_session='fetch') return count def delete_by_namespace_name(context, namespace_name, session): namespace = namespace_api.get(context, namespace_name, session) return delete_namespace_content(context, namespace['id'], session) def count(context, namespace_name, session): """Get the count of objects for a namespace, raise if ns not found""" namespace = namespace_api.get(context, namespace_name, session) query = (session.query(func.count(models.MetadefTag.id)).filter_by( namespace_id=namespace['id'])) return query.scalar() glance-12.0.0/glance/db/sqlalchemy/metadef_api/resource_type_association.py0000664000567000056710000002012512701407047030242 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging import sqlalchemy.orm as sa_orm from glance.common import exception as exc from glance.db.sqlalchemy.metadef_api import namespace as namespace_api from glance.db.sqlalchemy.metadef_api import resource_type as resource_type_api from glance.db.sqlalchemy.metadef_api import utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models LOG = logging.getLogger(__name__) def _to_db_dict(namespace_id, resource_type_id, model_dict): """transform a model dict to a metadef_namespace_resource_type dict""" db_dict = {'namespace_id': namespace_id, 'resource_type_id': resource_type_id, 'properties_target': model_dict['properties_target'], 'prefix': model_dict['prefix']} return db_dict def _to_model_dict(resource_type_name, ns_res_type_dict): """transform a metadef_namespace_resource_type dict to a model dict""" model_dict = {'name': resource_type_name, 'properties_target': ns_res_type_dict['properties_target'], 'prefix': ns_res_type_dict['prefix'], 'created_at': ns_res_type_dict['created_at'], 'updated_at': ns_res_type_dict['updated_at']} return model_dict def _set_model_dict(resource_type_name, properties_target, prefix, created_at, updated_at): """return a model dict set with the passed in key values""" model_dict = {'name': resource_type_name, 'properties_target': properties_target, 'prefix': prefix, 'created_at': created_at, 'updated_at': updated_at} return model_dict def _get(context, namespace_name, resource_type_name, namespace_id, resource_type_id, session): """Get a namespace resource_type association""" # visibility check assumed done in calling routine via namespace_get try: query = session.query(models.MetadefNamespaceResourceType).filter_by( namespace_id=namespace_id, resource_type_id=resource_type_id) db_rec = query.one() except sa_orm.exc.NoResultFound: LOG.debug("The metadata definition resource-type association of" " resource_type=%(resource_type_name)s to" " namespace_name=%(namespace_name)s was not found.", {'resource_type_name': resource_type_name, 'namespace_name': namespace_name}) raise exc.MetadefResourceTypeAssociationNotFound( resource_type_name=resource_type_name, namespace_name=namespace_name) return db_rec def _create_association( context, namespace_name, resource_type_name, values, session): """Create an association, raise if it already exists.""" namespace_resource_type_rec = models.MetadefNamespaceResourceType() metadef_utils.drop_protected_attrs( models.MetadefNamespaceResourceType, values) # values['updated_at'] = timeutils.utcnow() # TS mixin should do this namespace_resource_type_rec.update(values.copy()) try: namespace_resource_type_rec.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("The metadata definition resource-type association of" " resource_type=%(resource_type_name)s to" " namespace=%(namespace_name)s, already exists.", {'resource_type_name': resource_type_name, 'namespace_name': namespace_name}) raise exc.MetadefDuplicateResourceTypeAssociation( resource_type_name=resource_type_name, namespace_name=namespace_name) return namespace_resource_type_rec.to_dict() def _delete(context, namespace_name, resource_type_name, namespace_id, resource_type_id, session): """Delete a resource type association or raise if not found.""" db_rec = _get(context, namespace_name, resource_type_name, namespace_id, resource_type_id, session) session.delete(db_rec) session.flush() return db_rec.to_dict() def get(context, namespace_name, resource_type_name, session): """Get a resource_type associations; raise if not found""" namespace = namespace_api.get( context, namespace_name, session) resource_type = resource_type_api.get( context, resource_type_name, session) found = _get(context, namespace_name, resource_type_name, namespace['id'], resource_type['id'], session) return _to_model_dict(resource_type_name, found) def get_all_by_namespace(context, namespace_name, session): """List resource_type associations by namespace, raise if not found""" # namespace get raises an exception if not visible namespace = namespace_api.get( context, namespace_name, session) db_recs = ( session.query(models.MetadefResourceType) .join(models.MetadefResourceType.associations) .filter_by(namespace_id=namespace['id']) .values(models.MetadefResourceType.name, models.MetadefNamespaceResourceType.properties_target, models.MetadefNamespaceResourceType.prefix, models.MetadefNamespaceResourceType.created_at, models.MetadefNamespaceResourceType.updated_at)) model_dict_list = [] for name, properties_target, prefix, created_at, updated_at in db_recs: model_dict_list.append( _set_model_dict (name, properties_target, prefix, created_at, updated_at) ) return model_dict_list def create(context, namespace_name, values, session): """Create an association, raise if already exists or ns not found.""" namespace = namespace_api.get( context, namespace_name, session) # if the resource_type does not exist, create it resource_type_name = values['name'] metadef_utils.drop_protected_attrs( models.MetadefNamespaceResourceType, values) try: resource_type = resource_type_api.get( context, resource_type_name, session) except exc.NotFound: resource_type = None LOG.debug("Creating resource-type %s", resource_type_name) if resource_type is None: resource_type_dict = {'name': resource_type_name, 'protected': False} resource_type = resource_type_api.create( context, resource_type_dict, session) # Create the association record, set the field values ns_resource_type_dict = _to_db_dict( namespace['id'], resource_type['id'], values) new_rec = _create_association(context, namespace_name, resource_type_name, ns_resource_type_dict, session) return _to_model_dict(resource_type_name, new_rec) def delete(context, namespace_name, resource_type_name, session): """Delete an association or raise if not found""" namespace = namespace_api.get( context, namespace_name, session) resource_type = resource_type_api.get( context, resource_type_name, session) deleted = _delete(context, namespace_name, resource_type_name, namespace['id'], resource_type['id'], session) return _to_model_dict(resource_type_name, deleted) def delete_namespace_content(context, namespace_id, session): """Use this def only if the ns for the id has been verified as visible""" count = 0 query = session.query(models.MetadefNamespaceResourceType).filter_by( namespace_id=namespace_id) count = query.delete(synchronize_session='fetch') return count glance-12.0.0/glance/db/sqlalchemy/metadef_api/namespace.py0000664000567000056710000002524012701407047024715 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_db.sqlalchemy.utils import paginate_query from oslo_log import log as logging import sqlalchemy.exc as sa_exc from sqlalchemy import or_ import sqlalchemy.orm as sa_orm from glance.common import exception as exc import glance.db.sqlalchemy.metadef_api as metadef_api from glance.db.sqlalchemy import models_metadef as models from glance.i18n import _ LOG = logging.getLogger(__name__) def _is_namespace_visible(context, namespace, status=None): """Return True if the namespace is visible in this context.""" # Is admin == visible if context.is_admin: return True # No owner == visible if namespace['owner'] is None: return True # Is public == visible if 'visibility' in namespace: if namespace['visibility'] == 'public': return True # context.owner has a value and is the namespace owner == visible if context.owner is not None: if context.owner == namespace['owner']: return True # Private return False def _select_namespaces_query(context, session): """Build the query to get all namespaces based on the context""" LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s", {'is_admin': context.is_admin, 'owner': context.owner}) # If admin, return everything. query_ns = session.query(models.MetadefNamespace) if context.is_admin: return query_ns else: # If regular user, return only public namespaces. # However, if context.owner has a value, return both # public and private namespaces of the context.owner. if context.owner is not None: query = ( query_ns.filter( or_(models.MetadefNamespace.owner == context.owner, models.MetadefNamespace.visibility == 'public'))) else: query = query_ns.filter( models.MetadefNamespace.visibility == 'public') return query def _get(context, namespace_id, session): """Get a namespace by id, raise if not found""" try: query = session.query(models.MetadefNamespace).filter_by( id=namespace_id) namespace_rec = query.one() except sa_orm.exc.NoResultFound: msg = (_("Metadata definition namespace not found for id=%s") % namespace_id) LOG.warn(msg) raise exc.MetadefNamespaceNotFound(msg) # Make sure they are allowed to view it. if not _is_namespace_visible(context, namespace_rec.to_dict()): LOG.debug("Forbidding request, metadata definition namespace=%s" " is not visible.", namespace_rec.namespace) emsg = _("Forbidding request, metadata definition namespace=%s" " is not visible.") % namespace_rec.namespace raise exc.MetadefForbidden(emsg) return namespace_rec def _get_by_name(context, name, session): """Get a namespace by name, raise if not found""" try: query = session.query(models.MetadefNamespace).filter_by( namespace=name) namespace_rec = query.one() except sa_orm.exc.NoResultFound: LOG.debug("Metadata definition namespace=%s was not found.", name) raise exc.MetadefNamespaceNotFound(namespace_name=name) # Make sure they are allowed to view it. if not _is_namespace_visible(context, namespace_rec.to_dict()): LOG.debug("Forbidding request, metadata definition namespace=%s" " is not visible.", name) emsg = _("Forbidding request, metadata definition namespace=%s" " is not visible.") % name raise exc.MetadefForbidden(emsg) return namespace_rec def _get_all(context, session, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc'): """Get all namespaces that match zero or more filters. :param filters: dict of filter keys and values. :param marker: namespace id after which to start page :param limit: maximum number of namespaces to return :param sort_key: namespace attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) """ filters = filters or {} query = _select_namespaces_query(context, session) # if visibility filter, apply it to the context based query visibility = filters.pop('visibility', None) if visibility is not None: query = query.filter(models.MetadefNamespace.visibility == visibility) # if id_list filter, apply it to the context based query id_list = filters.pop('id_list', None) if id_list is not None: query = query.filter(models.MetadefNamespace.id.in_(id_list)) marker_namespace = None if marker is not None: marker_namespace = _get(context, marker, session) sort_keys = ['created_at', 'id'] sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys query = paginate_query(query=query, model=models.MetadefNamespace, limit=limit, sort_keys=sort_keys, marker=marker_namespace, sort_dir=sort_dir) return query.all() def _get_all_by_resource_types(context, session, filters, marker=None, limit=None, sort_key=None, sort_dir=None): """get all visible namespaces for the specified resource_types""" resource_types = filters['resource_types'] resource_type_list = resource_types.split(',') db_recs = ( session.query(models.MetadefResourceType) .join(models.MetadefResourceType.associations) .filter(models.MetadefResourceType.name.in_(resource_type_list)) .values(models.MetadefResourceType.name, models.MetadefNamespaceResourceType.namespace_id) ) namespace_id_list = [] for name, namespace_id in db_recs: namespace_id_list.append(namespace_id) if len(namespace_id_list) is 0: return [] filters2 = filters filters2.update({'id_list': namespace_id_list}) return _get_all(context, session, filters2, marker, limit, sort_key, sort_dir) def get_all(context, session, marker=None, limit=None, sort_key=None, sort_dir=None, filters=None): """List all visible namespaces""" namespaces = [] filters = filters or {} if 'resource_types' in filters: namespaces = _get_all_by_resource_types( context, session, filters, marker, limit, sort_key, sort_dir) else: namespaces = _get_all( context, session, filters, marker, limit, sort_key, sort_dir) return map(lambda ns: ns.to_dict(), namespaces) def get(context, name, session): """Get a namespace by name, raise if not found""" namespace_rec = _get_by_name(context, name, session) return namespace_rec.to_dict() def create(context, values, session): """Create a namespace, raise if namespace already exists.""" namespace_name = values['namespace'] namespace = models.MetadefNamespace() metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values) namespace.update(values.copy()) try: namespace.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Can not create the metadata definition namespace." " Namespace=%s already exists.", namespace_name) raise exc.MetadefDuplicateNamespace( namespace_name=namespace_name) return namespace.to_dict() def update(context, namespace_id, values, session): """Update a namespace, raise if not found/visible or duplicate result""" namespace_rec = _get(context, namespace_id, session) metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values) try: namespace_rec.update(values.copy()) namespace_rec.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Invalid update. It would result in a duplicate" " metadata definition namespace with the same name of %s", values['namespace']) emsg = (_("Invalid update. It would result in a duplicate" " metadata definition namespace with the same name of %s") % values['namespace']) raise exc.MetadefDuplicateNamespace(emsg) return namespace_rec.to_dict() def delete(context, name, session): """Raise if not found, has references or not visible""" namespace_rec = _get_by_name(context, name, session) try: session.delete(namespace_rec) session.flush() except db_exc.DBError as e: if isinstance(e.inner_exception, sa_exc.IntegrityError): LOG.debug("Metadata definition namespace=%s not deleted. " "Other records still refer to it.", name) raise exc.MetadefIntegrityError( record_type='namespace', record_name=name) else: raise e return namespace_rec.to_dict() def delete_cascade(context, name, session): """Raise if not found, has references or not visible""" namespace_rec = _get_by_name(context, name, session) with session.begin(): try: metadef_api.tag.delete_namespace_content( context, namespace_rec.id, session) metadef_api.object.delete_namespace_content( context, namespace_rec.id, session) metadef_api.property.delete_namespace_content( context, namespace_rec.id, session) metadef_api.resource_type_association.delete_namespace_content( context, namespace_rec.id, session) session.delete(namespace_rec) session.flush() except db_exc.DBError as e: if isinstance(e.inner_exception, sa_exc.IntegrityError): LOG.debug("Metadata definition namespace=%s not deleted. " "Other records still refer to it.", name) raise exc.MetadefIntegrityError( record_type='namespace', record_name=name) else: raise e return namespace_rec.to_dict() glance-12.0.0/glance/db/sqlalchemy/metadef_api/resource_type.py0000664000567000056710000000706512701407047025656 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging import sqlalchemy.exc as sa_exc import sqlalchemy.orm as sa_orm from glance.common import exception as exc import glance.db.sqlalchemy.metadef_api.utils as metadef_utils from glance.db.sqlalchemy import models_metadef as models LOG = logging.getLogger(__name__) def get(context, name, session): """Get a resource type, raise if not found""" try: query = session.query(models.MetadefResourceType).filter_by(name=name) resource_type = query.one() except sa_orm.exc.NoResultFound: LOG.debug("No metadata definition resource-type found with name %s", name) raise exc.MetadefResourceTypeNotFound(resource_type_name=name) return resource_type.to_dict() def get_all(context, session): """Get a list of all resource types""" query = session.query(models.MetadefResourceType) resource_types = query.all() resource_types_list = [] for rt in resource_types: resource_types_list.append(rt.to_dict()) return resource_types_list def create(context, values, session): """Create a resource_type, raise if it already exists.""" resource_type = models.MetadefResourceType() metadef_utils.drop_protected_attrs(models.MetadefResourceType, values) resource_type.update(values.copy()) try: resource_type.save(session=session) except db_exc.DBDuplicateEntry: LOG.debug("Can not create the metadata definition resource-type. " "A resource-type with name=%s already exists.", resource_type.name) raise exc.MetadefDuplicateResourceType( resource_type_name=resource_type.name) return resource_type.to_dict() def update(context, values, session): """Update a resource type, raise if not found""" name = values['name'] metadef_utils.drop_protected_attrs(models.MetadefResourceType, values) db_rec = get(context, name, session) db_rec.update(values.copy()) db_rec.save(session=session) return db_rec.to_dict() def delete(context, name, session): """Delete a resource type or raise if not found or is protected""" db_rec = get(context, name, session) if db_rec.protected is True: LOG.debug("Delete forbidden. Metadata definition resource-type %s is a" " seeded-system type and can not be deleted.", name) raise exc.ProtectedMetadefResourceTypeSystemDelete( resource_type_name=name) try: session.delete(db_rec) session.flush() except db_exc.DBError as e: if isinstance(e.inner_exception, sa_exc.IntegrityError): LOG.debug("Could not delete Metadata definition resource-type %s" ". It still has content", name) raise exc.MetadefIntegrityError( record_type='resource-type', record_name=name) else: raise e return db_rec.to_dict() glance-12.0.0/glance/db/sqlalchemy/api.py0000664000567000056710000020711712701407047021301 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # Copyright 2013 IBM Corp. # Copyright 2015 Mirantis, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access.""" import datetime import threading from oslo_config import cfg from oslo_db import exception as db_exception from oslo_db.sqlalchemy import session from oslo_log import log as logging import osprofiler.sqlalchemy from retrying import retry import six # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range import sqlalchemy from sqlalchemy.ext.compiler import compiles from sqlalchemy import MetaData, Table import sqlalchemy.orm as sa_orm from sqlalchemy import sql import sqlalchemy.sql as sa_sql from glance.common import exception from glance.common import timeutils from glance.common import utils from glance.db.sqlalchemy import glare from glance.db.sqlalchemy.metadef_api import (resource_type as metadef_resource_type_api) from glance.db.sqlalchemy.metadef_api import (resource_type_association as metadef_association_api) from glance.db.sqlalchemy.metadef_api import namespace as metadef_namespace_api from glance.db.sqlalchemy.metadef_api import object as metadef_object_api from glance.db.sqlalchemy.metadef_api import property as metadef_property_api from glance.db.sqlalchemy.metadef_api import tag as metadef_tag_api from glance.db.sqlalchemy import models from glance import glare as ga from glance.i18n import _, _LW, _LE, _LI BASE = models.BASE sa_logger = None LOG = logging.getLogger(__name__) STATUSES = ['active', 'saving', 'queued', 'killed', 'pending_delete', 'deleted', 'deactivated'] CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") _FACADE = None _LOCK = threading.Lock() def _retry_on_deadlock(exc): """Decorator to retry a DB API call if Deadlock was received.""" if isinstance(exc, db_exception.DBDeadlock): LOG.warn(_LW("Deadlock detected. Retrying...")) return True return False def _create_facade_lazily(): global _LOCK, _FACADE if _FACADE is None: with _LOCK: if _FACADE is None: _FACADE = session.EngineFacade.from_config(CONF) if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: osprofiler.sqlalchemy.add_tracing(sqlalchemy, _FACADE.get_engine(), "db") return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(autocommit=True, expire_on_commit=False): facade = _create_facade_lazily() return facade.get_session(autocommit=autocommit, expire_on_commit=expire_on_commit) def clear_db_env(): """ Unset global configuration variables for database. """ global _FACADE _FACADE = None def _check_mutate_authorization(context, image_ref): if not is_image_mutable(context, image_ref): LOG.warn(_LW("Attempted to modify image user did not own.")) msg = _("You do not own this image") if image_ref.is_public: exc_class = exception.ForbiddenPublicImage else: exc_class = exception.Forbidden raise exc_class(msg) def image_create(context, values): """Create an image from the values dictionary.""" return _image_update(context, values, None, purge_props=False) def image_update(context, image_id, values, purge_props=False, from_state=None): """ Set the given properties on an image and update it. :raises: ImageNotFound if image does not exist. """ return _image_update(context, values, image_id, purge_props, from_state=from_state) @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) def image_destroy(context, image_id): """Destroy the image or raise if it does not exist.""" session = get_session() with session.begin(): image_ref = _image_get(context, image_id, session=session) # Perform authorization check _check_mutate_authorization(context, image_ref) image_ref.delete(session=session) delete_time = image_ref.deleted_at _image_locations_delete_all(context, image_id, delete_time, session) _image_property_delete_all(context, image_id, delete_time, session) _image_member_delete_all(context, image_id, delete_time, session) _image_tag_delete_all(context, image_id, delete_time, session) return _normalize_locations(context, image_ref) def _normalize_locations(context, image, force_show_deleted=False): """ Generate suitable dictionary list for locations field of image. We don't need to set other data fields of location record which return from image query. """ if image['status'] == 'deactivated' and not context.is_admin: # Locations are not returned for a deactivated image for non-admin user image['locations'] = [] return image if force_show_deleted: locations = image['locations'] else: locations = filter(lambda x: not x.deleted, image['locations']) image['locations'] = [{'id': loc['id'], 'url': loc['value'], 'metadata': loc['meta_data'], 'status': loc['status']} for loc in locations] return image def _normalize_tags(image): undeleted_tags = filter(lambda x: not x.deleted, image['tags']) image['tags'] = [tag['value'] for tag in undeleted_tags] return image def image_get(context, image_id, session=None, force_show_deleted=False): image = _image_get(context, image_id, session=session, force_show_deleted=force_show_deleted) image = _normalize_locations(context, image.to_dict(), force_show_deleted=force_show_deleted) return image def _check_image_id(image_id): """ check if the given image id is valid before executing operations. For now, we only check its length. The original purpose of this method is wrapping the different behaviors between MySql and DB2 when the image id length is longer than the defined length in database model. :param image_id: The id of the image we want to check :returns: Raise NoFound exception if given image id is invalid """ if (image_id and len(image_id) > models.Image.id.property.columns[0].type.length): raise exception.ImageNotFound() def _image_get(context, image_id, session=None, force_show_deleted=False): """Get an image or raise if it does not exist.""" _check_image_id(image_id) session = session or get_session() try: query = session.query(models.Image).options( sa_orm.joinedload(models.Image.properties)).options( sa_orm.joinedload( models.Image.locations)).filter_by(id=image_id) # filter out deleted images if context disallows it if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) image = query.one() except sa_orm.exc.NoResultFound: msg = "No image found with ID %s" % image_id LOG.debug(msg) raise exception.ImageNotFound(msg) # Make sure they can look at it if not is_image_visible(context, image): msg = "Forbidding request, image %s not visible" % image_id LOG.debug(msg) raise exception.Forbidden(msg) return image def is_image_mutable(context, image): """Return True if the image is mutable in this context.""" # Is admin == image mutable if context.is_admin: return True # No owner == image not mutable if image['owner'] is None or context.owner is None: return False # Image only mutable by its owner return image['owner'] == context.owner def is_image_visible(context, image, status=None): """Return True if the image is visible in this context.""" # Is admin == image visible if context.is_admin: return True # No owner == image visible if image['owner'] is None: return True # Image is_public == image visible if image['is_public']: return True # Perform tests based on whether we have an owner if context.owner is not None: if context.owner == image['owner']: return True # Figure out if this image is shared with that tenant members = image_member_find(context, image_id=image['id'], member=context.owner, status=status) if members: return True # Private image return False def _get_default_column_value(column_type): """Return the default value of the columns from DB table In postgreDB case, if no right default values are being set, an psycopg2.DataError will be thrown. """ type_schema = { 'datetime': None, 'big_integer': 0, 'integer': 0, 'string': '' } if isinstance(column_type, sa_sql.type_api.Variant): return _get_default_column_value(column_type.impl) return type_schema[column_type.__visit_name__] def _paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :rtype: sqlalchemy.orm.query.Query :returns: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?')) assert(not (sort_dir and sort_dirs)) # nosec # nosec: This function runs safely if the assertion fails. # Default the sort direction to ascending if sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir] * len(sort_keys) assert(len(sort_dirs) == len(sort_keys)) # nosec # nosec: This function runs safely if the assertion fails. if len(sort_dirs) < len(sort_keys): sort_dirs += [sort_dir] * (len(sort_keys) - len(sort_dirs)) # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise exception.InvalidSortKey() query = query.order_by(sort_dir_func(sort_key_attr)) default = '' # Default to an empty string if NULL # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) if v is None: v = default marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(len(sort_keys)): crit_attrs = [] for j in range(i): model_attr = getattr(model, sort_keys[j]) default = _get_default_column_value( model_attr.property.columns[0].type) attr = sa_sql.expression.case([(model_attr != None, model_attr), ], else_=default) crit_attrs.append((attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) default = _get_default_column_value( model_attr.property.columns[0].type) attr = sa_sql.expression.case([(model_attr != None, model_attr), ], else_=default) if sort_dirs[i] == 'desc': crit_attrs.append((attr < marker_values[i])) elif sort_dirs[i] == 'asc': crit_attrs.append((attr > marker_values[i])) else: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) criteria = sa_sql.and_(*crit_attrs) criteria_list.append(criteria) f = sa_sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) return query def _make_conditions_from_filters(filters, is_public=None): # NOTE(venkatesh) make copy of the filters are to be altered in this # method. filters = filters.copy() image_conditions = [] prop_conditions = [] tag_conditions = [] if is_public is not None: image_conditions.append(models.Image.is_public == is_public) if 'checksum' in filters: checksum = filters.pop('checksum') image_conditions.append(models.Image.checksum == checksum) if 'is_public' in filters: key = 'is_public' value = filters.pop('is_public') prop_filters = _make_image_property_condition(key=key, value=value) prop_conditions.append(prop_filters) for (k, v) in filters.pop('properties', {}).items(): prop_filters = _make_image_property_condition(key=k, value=v) prop_conditions.append(prop_filters) if 'changes-since' in filters: # normalize timestamp to UTC, as sqlalchemy doesn't appear to # respect timezone offsets changes_since = timeutils.normalize_time(filters.pop('changes-since')) image_conditions.append(models.Image.updated_at > changes_since) if 'deleted' in filters: deleted_filter = filters.pop('deleted') image_conditions.append(models.Image.deleted == deleted_filter) # TODO(bcwaldon): handle this logic in registry server if not deleted_filter: image_statuses = [s for s in STATUSES if s != 'killed'] image_conditions.append(models.Image.status.in_(image_statuses)) if 'tags' in filters: tags = filters.pop('tags') for tag in tags: tag_filters = [models.ImageTag.deleted == False] tag_filters.extend([models.ImageTag.value == tag]) tag_conditions.append(tag_filters) filters = {k: v for k, v in filters.items() if v is not None} # need to copy items because filters is modified in the loop body # (filters.pop(k)) keys = list(filters.keys()) for k in keys: key = k if k.endswith('_min') or k.endswith('_max'): key = key[0:-4] try: v = int(filters.pop(k)) except ValueError: msg = _("Unable to filter on a range " "with a non-numeric value.") raise exception.InvalidFilterRangeValue(msg) if k.endswith('_min'): image_conditions.append(getattr(models.Image, key) >= v) if k.endswith('_max'): image_conditions.append(getattr(models.Image, key) <= v) elif k in ['created_at', 'updated_at']: attr_value = getattr(models.Image, key) operator, isotime = utils.split_filter_op(filters.pop(k)) try: parsed_time = timeutils.parse_isotime(isotime) threshold = timeutils.normalize_time(parsed_time) except ValueError: msg = (_("Bad \"%s\" query filter format. " "Use ISO 8601 DateTime notation.") % k) raise exception.InvalidParameterValue(msg) comparison = utils.evaluate_filter_op(attr_value, operator, threshold) image_conditions.append(comparison) elif k in ['name', 'id', 'status', 'container_format', 'disk_format']: attr_value = getattr(models.Image, key) operator, list_value = utils.split_filter_op(filters.pop(k)) if operator == 'in': threshold = utils.split_filter_value_for_quotes(list_value) comparison = attr_value.in_(threshold) image_conditions.append(comparison) elif operator == 'eq': image_conditions.append(attr_value == list_value) else: msg = (_("Unable to filter by unknown operator '%s'.") % operator) raise exception.InvalidFilterOperatorValue(msg) for (k, value) in filters.items(): if hasattr(models.Image, k): image_conditions.append(getattr(models.Image, k) == value) else: prop_filters = _make_image_property_condition(key=k, value=value) prop_conditions.append(prop_filters) return image_conditions, prop_conditions, tag_conditions def _make_image_property_condition(key, value): prop_filters = [models.ImageProperty.deleted == False] prop_filters.extend([models.ImageProperty.name == key]) prop_filters.extend([models.ImageProperty.value == value]) return prop_filters def _select_images_query(context, image_conditions, admin_as_user, member_status, visibility): session = get_session() img_conditional_clause = sa_sql.and_(*image_conditions) regular_user = (not context.is_admin) or admin_as_user query_member = session.query(models.Image).join( models.Image.members).filter(img_conditional_clause) if regular_user: member_filters = [models.ImageMember.deleted == False] if context.owner is not None: member_filters.extend([models.ImageMember.member == context.owner]) if member_status != 'all': member_filters.extend([ models.ImageMember.status == member_status]) query_member = query_member.filter(sa_sql.and_(*member_filters)) # NOTE(venkatesh) if the 'visibility' is set to 'shared', we just # query the image members table. No union is required. if visibility is not None and visibility == 'shared': return query_member query_image = session.query(models.Image).filter(img_conditional_clause) if regular_user: query_image = query_image.filter(models.Image.is_public == True) query_image_owner = None if context.owner is not None: query_image_owner = session.query(models.Image).filter( models.Image.owner == context.owner).filter( img_conditional_clause) if query_image_owner is not None: query = query_image.union(query_image_owner, query_member) else: query = query_image.union(query_member) return query else: # Admin user return query_image def image_get_all(context, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False): """ Get all images that match zero or more filters. :param filters: dict of filter keys and values. If a 'properties' key is present, it is treated as a dict of key/value filters on the image properties attribute :param marker: image id after which to start page :param limit: maximum number of images to return :param sort_key: list of image attributes by which results should be sorted :param sort_dir: directions in which results should be sorted (asc, desc) :param member_status: only return shared images that have this membership status :param is_public: If true, return only public images. If false, return only private and shared images. :param admin_as_user: For backwards compatibility. If true, then return to an admin the equivalent set of images which it would see if it was a regular user :param return_tag: To indicates whether image entry in result includes it relevant tag entries. This could improve upper-layer query performance, to prevent using separated calls """ sort_key = ['created_at'] if not sort_key else sort_key default_sort_dir = 'desc' if not sort_dir: sort_dir = [default_sort_dir] * len(sort_key) elif len(sort_dir) == 1: default_sort_dir = sort_dir[0] sort_dir *= len(sort_key) filters = filters or {} visibility = filters.pop('visibility', None) showing_deleted = 'changes-since' in filters or filters.get('deleted', False) img_cond, prop_cond, tag_cond = _make_conditions_from_filters( filters, is_public) query = _select_images_query(context, img_cond, admin_as_user, member_status, visibility) if visibility is not None: if visibility == 'public': query = query.filter(models.Image.is_public == True) elif visibility == 'private': query = query.filter(models.Image.is_public == False) if prop_cond: for prop_condition in prop_cond: query = query.join(models.ImageProperty, aliased=True).filter( sa_sql.and_(*prop_condition)) if tag_cond: for tag_condition in tag_cond: query = query.join(models.ImageTag, aliased=True).filter( sa_sql.and_(*tag_condition)) marker_image = None if marker is not None: marker_image = _image_get(context, marker, force_show_deleted=showing_deleted) for key in ['created_at', 'id']: if key not in sort_key: sort_key.append(key) sort_dir.append(default_sort_dir) query = _paginate_query(query, models.Image, limit, sort_key, marker=marker_image, sort_dir=None, sort_dirs=sort_dir) query = query.options(sa_orm.joinedload( models.Image.properties)).options( sa_orm.joinedload(models.Image.locations)) if return_tag: query = query.options(sa_orm.joinedload(models.Image.tags)) images = [] for image in query.all(): image_dict = image.to_dict() image_dict = _normalize_locations(context, image_dict, force_show_deleted=showing_deleted) if return_tag: image_dict = _normalize_tags(image_dict) images.append(image_dict) return images def _drop_protected_attrs(model_class, values): """ Removed protected attributes from values dictionary using the models __protected_attributes__ field. """ for attr in model_class.__protected_attributes__: if attr in values: del values[attr] def _image_get_disk_usage_by_owner(owner, session, image_id=None): query = session.query(models.Image) query = query.filter(models.Image.owner == owner) if image_id is not None: query = query.filter(models.Image.id != image_id) query = query.filter(models.Image.size > 0) query = query.filter(~models.Image.status.in_(['killed', 'deleted'])) images = query.all() total = 0 for i in images: locations = [l for l in i.locations if l['status'] != 'deleted'] total += (i.size * len(locations)) return total def _validate_image(values, mandatory_status=True): """ Validates the incoming data and raises a Invalid exception if anything is out of order. :param values: Mapping of image metadata to check :param mandatory_status: Whether to validate status from values """ if mandatory_status: status = values.get('status') if not status: msg = "Image status is required." raise exception.Invalid(msg) if status not in STATUSES: msg = "Invalid image status '%s' for image." % status raise exception.Invalid(msg) # validate integer values to eliminate DBError on save utils.validate_mysql_int(min_disk=values.get('min_disk'), min_ram=values.get('min_ram')) return values def _update_values(image_ref, values): for k in values: if getattr(image_ref, k) != values[k]: setattr(image_ref, k, values[k]) @retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, stop_max_attempt_number=50) @utils.no_4byte_params def _image_update(context, values, image_id, purge_props=False, from_state=None): """ Used internally by image_create and image_update :param context: Request context :param values: A dict of attributes to set :param image_id: If None, create the image, otherwise, find and update it """ # NOTE(jbresnah) values is altered in this so a copy is needed values = values.copy() session = get_session() with session.begin(): # Remove the properties passed in the values mapping. We # handle properties separately from base image attributes, # and leaving properties in the values mapping will cause # a SQLAlchemy model error because SQLAlchemy expects the # properties attribute of an Image model to be a list and # not a dict. properties = values.pop('properties', {}) location_data = values.pop('locations', None) new_status = values.get('status', None) if image_id: image_ref = _image_get(context, image_id, session=session) current = image_ref.status # Perform authorization check _check_mutate_authorization(context, image_ref) else: if values.get('size') is not None: values['size'] = int(values['size']) if 'min_ram' in values: values['min_ram'] = int(values['min_ram'] or 0) if 'min_disk' in values: values['min_disk'] = int(values['min_disk'] or 0) values['is_public'] = bool(values.get('is_public', False)) values['protected'] = bool(values.get('protected', False)) image_ref = models.Image() # Need to canonicalize ownership if 'owner' in values and not values['owner']: values['owner'] = None if image_id: # Don't drop created_at if we're passing it in... _drop_protected_attrs(models.Image, values) # NOTE(iccha-sethi): updated_at must be explicitly set in case # only ImageProperty table was modifited values['updated_at'] = timeutils.utcnow() if image_id: query = session.query(models.Image).filter_by(id=image_id) if from_state: query = query.filter_by(status=from_state) mandatory_status = True if new_status else False _validate_image(values, mandatory_status=mandatory_status) # Validate fields for Images table. This is similar to what is done # for the query result update except that we need to do it prior # in this case. values = {key: values[key] for key in values if key in image_ref.to_dict()} updated = query.update(values, synchronize_session='fetch') if not updated: msg = (_('cannot transition from %(current)s to ' '%(next)s in update (wanted ' 'from_state=%(from)s)') % {'current': current, 'next': new_status, 'from': from_state}) raise exception.Conflict(msg) image_ref = _image_get(context, image_id, session=session) else: image_ref.update(values) # Validate the attributes before we go any further. From my # investigation, the @validates decorator does not validate # on new records, only on existing records, which is, well, # idiotic. values = _validate_image(image_ref.to_dict()) _update_values(image_ref, values) try: image_ref.save(session=session) except db_exception.DBDuplicateEntry: raise exception.Duplicate("Image ID %s already exists!" % values['id']) _set_properties_for_image(context, image_ref, properties, purge_props, session) if location_data: _image_locations_set(context, image_ref.id, location_data, session=session) return image_get(context, image_ref.id) @utils.no_4byte_params def image_location_add(context, image_id, location, session=None): deleted = location['status'] in ('deleted', 'pending_delete') delete_time = timeutils.utcnow() if deleted else None location_ref = models.ImageLocation(image_id=image_id, value=location['url'], meta_data=location['metadata'], status=location['status'], deleted=deleted, deleted_at=delete_time) session = session or get_session() location_ref.save(session=session) @utils.no_4byte_params def image_location_update(context, image_id, location, session=None): loc_id = location.get('id') if loc_id is None: msg = _("The location data has an invalid ID: %d") % loc_id raise exception.Invalid(msg) try: session = session or get_session() location_ref = session.query(models.ImageLocation).filter_by( id=loc_id).filter_by(image_id=image_id).one() deleted = location['status'] in ('deleted', 'pending_delete') updated_time = timeutils.utcnow() delete_time = updated_time if deleted else None location_ref.update({"value": location['url'], "meta_data": location['metadata'], "status": location['status'], "deleted": deleted, "updated_at": updated_time, "deleted_at": delete_time}) location_ref.save(session=session) except sa_orm.exc.NoResultFound: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=loc_id, img=image_id)) LOG.warn(msg) raise exception.NotFound(msg) def image_location_delete(context, image_id, location_id, status, delete_time=None, session=None): if status not in ('deleted', 'pending_delete'): msg = _("The status of deleted image location can only be set to " "'pending_delete' or 'deleted'") raise exception.Invalid(msg) try: session = session or get_session() location_ref = session.query(models.ImageLocation).filter_by( id=location_id).filter_by(image_id=image_id).one() delete_time = delete_time or timeutils.utcnow() location_ref.update({"deleted": True, "status": status, "updated_at": delete_time, "deleted_at": delete_time}) location_ref.save(session=session) except sa_orm.exc.NoResultFound: msg = (_("No location found with ID %(loc)s from image %(img)s") % dict(loc=location_id, img=image_id)) LOG.warn(msg) raise exception.NotFound(msg) def _image_locations_set(context, image_id, locations, session=None): # NOTE(zhiyan): 1. Remove records from DB for deleted locations session = session or get_session() query = session.query(models.ImageLocation).filter_by( image_id=image_id).filter_by(deleted=False) loc_ids = [loc['id'] for loc in locations if loc.get('id')] if loc_ids: query = query.filter(~models.ImageLocation.id.in_(loc_ids)) for loc_id in [loc_ref.id for loc_ref in query.all()]: image_location_delete(context, image_id, loc_id, 'deleted', session=session) # NOTE(zhiyan): 2. Adding or update locations for loc in locations: if loc.get('id') is None: image_location_add(context, image_id, loc, session=session) else: image_location_update(context, image_id, loc, session=session) def _image_locations_delete_all(context, image_id, delete_time=None, session=None): """Delete all image locations for given image""" session = session or get_session() location_refs = session.query(models.ImageLocation).filter_by( image_id=image_id).filter_by(deleted=False).all() for loc_id in [loc_ref.id for loc_ref in location_refs]: image_location_delete(context, image_id, loc_id, 'deleted', delete_time=delete_time, session=session) @utils.no_4byte_params def _set_properties_for_image(context, image_ref, properties, purge_props=False, session=None): """ Create or update a set of image_properties for a given image :param context: Request context :param image_ref: An Image object :param properties: A dict of properties to set :param session: A SQLAlchemy session to use (if present) """ orig_properties = {} for prop_ref in image_ref.properties: orig_properties[prop_ref.name] = prop_ref for name, value in six.iteritems(properties): prop_values = {'image_id': image_ref.id, 'name': name, 'value': value} if name in orig_properties: prop_ref = orig_properties[name] _image_property_update(context, prop_ref, prop_values, session=session) else: image_property_create(context, prop_values, session=session) if purge_props: for key in orig_properties.keys(): if key not in properties: prop_ref = orig_properties[key] image_property_delete(context, prop_ref.name, image_ref.id, session=session) def _image_child_entry_delete_all(child_model_cls, image_id, delete_time=None, session=None): """Deletes all the child entries for the given image id. Deletes all the child entries of the given child entry ORM model class using the parent image's id. The child entry ORM model class can be one of the following: model.ImageLocation, model.ImageProperty, model.ImageMember and model.ImageTag. :param child_model_cls: the ORM model class. :param image_id: id of the image whose child entries are to be deleted. :param delete_time: datetime of deletion to be set. If None, uses current datetime. :param session: A SQLAlchemy session to use (if present) :rtype: int :returns: The number of child entries got soft-deleted. """ session = session or get_session() query = session.query(child_model_cls).filter_by( image_id=image_id).filter_by(deleted=False) delete_time = delete_time or timeutils.utcnow() count = query.update({"deleted": True, "deleted_at": delete_time}) return count def image_property_create(context, values, session=None): """Create an ImageProperty object.""" prop_ref = models.ImageProperty() prop = _image_property_update(context, prop_ref, values, session=session) return prop.to_dict() def _image_property_update(context, prop_ref, values, session=None): """ Used internally by image_property_create and image_property_update. """ _drop_protected_attrs(models.ImageProperty, values) values["deleted"] = False prop_ref.update(values) prop_ref.save(session=session) return prop_ref def image_property_delete(context, prop_ref, image_ref, session=None): """ Used internally by image_property_create and image_property_update. """ session = session or get_session() prop = session.query(models.ImageProperty).filter_by(image_id=image_ref, name=prop_ref).one() prop.delete(session=session) return prop def _image_property_delete_all(context, image_id, delete_time=None, session=None): """Delete all image properties for given image""" props_updated_count = _image_child_entry_delete_all(models.ImageProperty, image_id, delete_time, session) return props_updated_count def image_member_create(context, values, session=None): """Create an ImageMember object.""" memb_ref = models.ImageMember() _image_member_update(context, memb_ref, values, session=session) return _image_member_format(memb_ref) def _image_member_format(member_ref): """Format a member ref for consumption outside of this module.""" return { 'id': member_ref['id'], 'image_id': member_ref['image_id'], 'member': member_ref['member'], 'can_share': member_ref['can_share'], 'status': member_ref['status'], 'created_at': member_ref['created_at'], 'updated_at': member_ref['updated_at'], 'deleted': member_ref['deleted'] } def image_member_update(context, memb_id, values): """Update an ImageMember object.""" session = get_session() memb_ref = _image_member_get(context, memb_id, session) _image_member_update(context, memb_ref, values, session) return _image_member_format(memb_ref) def _image_member_update(context, memb_ref, values, session=None): """Apply supplied dictionary of values to a Member object.""" _drop_protected_attrs(models.ImageMember, values) values["deleted"] = False values.setdefault('can_share', False) memb_ref.update(values) memb_ref.save(session=session) return memb_ref def image_member_delete(context, memb_id, session=None): """Delete an ImageMember object.""" session = session or get_session() member_ref = _image_member_get(context, memb_id, session) _image_member_delete(context, member_ref, session) def _image_member_delete(context, memb_ref, session): memb_ref.delete(session=session) def _image_member_delete_all(context, image_id, delete_time=None, session=None): """Delete all image members for given image""" members_updated_count = _image_child_entry_delete_all(models.ImageMember, image_id, delete_time, session) return members_updated_count def _image_member_get(context, memb_id, session): """Fetch an ImageMember entity by id.""" query = session.query(models.ImageMember) query = query.filter_by(id=memb_id) return query.one() def image_member_find(context, image_id=None, member=None, status=None, include_deleted=False): """Find all members that meet the given criteria. Note, currently include_deleted should be true only when create a new image membership, as there may be a deleted image membership between the same image and tenant, the membership will be reused in this case. It should be false in other cases. :param image_id: identifier of image entity :param member: tenant to which membership has been granted :include_deleted: A boolean indicating whether the result should include the deleted record of image member """ session = get_session() members = _image_member_find(context, session, image_id, member, status, include_deleted) return [_image_member_format(m) for m in members] def _image_member_find(context, session, image_id=None, member=None, status=None, include_deleted=False): query = session.query(models.ImageMember) if not include_deleted: query = query.filter_by(deleted=False) if not context.is_admin: query = query.join(models.Image) filters = [ models.Image.owner == context.owner, models.ImageMember.member == context.owner, ] query = query.filter(sa_sql.or_(*filters)) if image_id is not None: query = query.filter(models.ImageMember.image_id == image_id) if member is not None: query = query.filter(models.ImageMember.member == member) if status is not None: query = query.filter(models.ImageMember.status == status) return query.all() def image_member_count(context, image_id): """Return the number of image members for this image :param image_id: identifier of image entity """ session = get_session() if not image_id: msg = _("Image id is required.") raise exception.Invalid(msg) query = session.query(models.ImageMember) query = query.filter_by(deleted=False) query = query.filter(models.ImageMember.image_id == str(image_id)) return query.count() def image_tag_set_all(context, image_id, tags): # NOTE(kragniz): tag ordering should match exactly what was provided, so a # subsequent call to image_tag_get_all returns them in the correct order session = get_session() existing_tags = image_tag_get_all(context, image_id, session) tags_created = [] for tag in tags: if tag not in tags_created and tag not in existing_tags: tags_created.append(tag) image_tag_create(context, image_id, tag, session) for tag in existing_tags: if tag not in tags: image_tag_delete(context, image_id, tag, session) @utils.no_4byte_params def image_tag_create(context, image_id, value, session=None): """Create an image tag.""" session = session or get_session() tag_ref = models.ImageTag(image_id=image_id, value=value) tag_ref.save(session=session) return tag_ref['value'] def image_tag_delete(context, image_id, value, session=None): """Delete an image tag.""" _check_image_id(image_id) session = session or get_session() query = session.query(models.ImageTag).filter_by( image_id=image_id).filter_by( value=value).filter_by(deleted=False) try: tag_ref = query.one() except sa_orm.exc.NoResultFound: raise exception.NotFound() tag_ref.delete(session=session) def _image_tag_delete_all(context, image_id, delete_time=None, session=None): """Delete all image tags for given image""" tags_updated_count = _image_child_entry_delete_all(models.ImageTag, image_id, delete_time, session) return tags_updated_count def image_tag_get_all(context, image_id, session=None): """Get a list of tags for a specific image.""" _check_image_id(image_id) session = session or get_session() tags = session.query(models.ImageTag.value).filter_by( image_id=image_id).filter_by(deleted=False).all() return [tag[0] for tag in tags] class DeleteFromSelect(sa_sql.expression.UpdateBase): def __init__(self, table, select, column): self.table = table self.select = select self.column = column # NOTE(abhishekk): MySQL doesn't yet support subquery with # 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select. @compiles(DeleteFromSelect) def visit_delete_from_select(element, compiler, **kw): return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % ( compiler.process(element.table, asfrom=True), compiler.process(element.column), element.column.name, compiler.process(element.select)) def purge_deleted_rows(context, age_in_days, max_rows, session=None): """Purges soft deleted rows Deletes rows of table images, table tasks and all dependent tables according to given age for relevant models. """ try: age_in_days = int(age_in_days) except ValueError: LOG.exception(_LE('Invalid value for age, %(age)d'), {'age': age_in_days}) raise exception.InvalidParameterValue(value=age_in_days, param='age_in_days') try: max_rows = int(max_rows) except ValueError: LOG.exception(_LE('Invalid value for max_rows, %(max_rows)d'), {'max_rows': max_rows}) raise exception.InvalidParameterValue(value=max_rows, param='max_rows') session = session or get_session() metadata = MetaData(get_engine()) deleted_age = timeutils.utcnow() - datetime.timedelta(days=age_in_days) tables = [] for model_class in models.__dict__.values(): if not hasattr(model_class, '__tablename__'): continue if hasattr(model_class, 'deleted'): tables.append(model_class.__tablename__) # get rid of FX constraints for tbl in ('images', 'tasks'): try: tables.remove(tbl) except ValueError: LOG.warning(_LW('Expected table %(tbl)s was not found in DB.'), {'tbl': tbl}) else: tables.append(tbl) for tbl in tables: tab = Table(tbl, metadata, autoload=True) LOG.info( _LI('Purging deleted rows older than %(age_in_days)d day(s) ' 'from table %(tbl)s'), {'age_in_days': age_in_days, 'tbl': tbl}) column = tab.c.id deleted_at_column = tab.c.deleted_at query_delete = sql.select( [column], deleted_at_column < deleted_age).order_by( deleted_at_column).limit(max_rows) delete_statement = DeleteFromSelect(tab, query_delete, column) with session.begin(): result = session.execute(delete_statement) rows = result.rowcount LOG.info(_LI('Deleted %(rows)d row(s) from table %(tbl)s'), {'rows': rows, 'tbl': tbl}) def user_get_storage_usage(context, owner_id, image_id=None, session=None): _check_image_id(image_id) session = session or get_session() total_size = _image_get_disk_usage_by_owner( owner_id, session, image_id=image_id) return total_size def _task_info_format(task_info_ref): """Format a task info ref for consumption outside of this module""" if task_info_ref is None: return {} return { 'task_id': task_info_ref['task_id'], 'input': task_info_ref['input'], 'result': task_info_ref['result'], 'message': task_info_ref['message'], } def _task_info_create(context, task_id, values, session=None): """Create an TaskInfo object""" session = session or get_session() task_info_ref = models.TaskInfo() task_info_ref.task_id = task_id task_info_ref.update(values) task_info_ref.save(session=session) return _task_info_format(task_info_ref) def _task_info_update(context, task_id, values, session=None): """Update an TaskInfo object""" session = session or get_session() task_info_ref = _task_info_get(context, task_id, session=session) if task_info_ref: task_info_ref.update(values) task_info_ref.save(session=session) return _task_info_format(task_info_ref) def _task_info_get(context, task_id, session=None): """Fetch an TaskInfo entity by task_id""" session = session or get_session() query = session.query(models.TaskInfo) query = query.filter_by(task_id=task_id) try: task_info_ref = query.one() except sa_orm.exc.NoResultFound: LOG.debug("TaskInfo was not found for task with id %(task_id)s", {'task_id': task_id}) task_info_ref = None return task_info_ref def task_create(context, values, session=None): """Create a task object""" values = values.copy() session = session or get_session() with session.begin(): task_info_values = _pop_task_info_values(values) task_ref = models.Task() _task_update(context, task_ref, values, session=session) _task_info_create(context, task_ref.id, task_info_values, session=session) return task_get(context, task_ref.id, session) def _pop_task_info_values(values): task_info_values = {} for k, v in values.items(): if k in ['input', 'result', 'message']: values.pop(k) task_info_values[k] = v return task_info_values def task_update(context, task_id, values, session=None): """Update a task object""" session = session or get_session() with session.begin(): task_info_values = _pop_task_info_values(values) task_ref = _task_get(context, task_id, session) _drop_protected_attrs(models.Task, values) values['updated_at'] = timeutils.utcnow() _task_update(context, task_ref, values, session) if task_info_values: _task_info_update(context, task_id, task_info_values, session) return task_get(context, task_id, session) def task_get(context, task_id, session=None, force_show_deleted=False): """Fetch a task entity by id""" task_ref = _task_get(context, task_id, session=session, force_show_deleted=force_show_deleted) return _task_format(task_ref, task_ref.info) def task_delete(context, task_id, session=None): """Delete a task""" session = session or get_session() task_ref = _task_get(context, task_id, session=session) task_ref.delete(session=session) return _task_format(task_ref, task_ref.info) def task_get_all(context, filters=None, marker=None, limit=None, sort_key='created_at', sort_dir='desc', admin_as_user=False): """ Get all tasks that match zero or more filters. :param filters: dict of filter keys and values. :param marker: task id after which to start page :param limit: maximum number of tasks to return :param sort_key: task attribute by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :param admin_as_user: For backwards compatibility. If true, then return to an admin the equivalent set of tasks which it would see if it were a regular user :returns: tasks set """ filters = filters or {} session = get_session() query = session.query(models.Task) if not (context.is_admin or admin_as_user) and context.owner is not None: query = query.filter(models.Task.owner == context.owner) showing_deleted = False if 'deleted' in filters: deleted_filter = filters.pop('deleted') query = query.filter_by(deleted=deleted_filter) showing_deleted = deleted_filter for (k, v) in filters.items(): if v is not None: key = k if hasattr(models.Task, key): query = query.filter(getattr(models.Task, key) == v) marker_task = None if marker is not None: marker_task = _task_get(context, marker, force_show_deleted=showing_deleted) sort_keys = ['created_at', 'id'] if sort_key not in sort_keys: sort_keys.insert(0, sort_key) query = _paginate_query(query, models.Task, limit, sort_keys, marker=marker_task, sort_dir=sort_dir) task_refs = query.all() tasks = [] for task_ref in task_refs: tasks.append(_task_format(task_ref, task_info_ref=None)) return tasks def _is_task_visible(context, task): """Return True if the task is visible in this context.""" # Is admin == task visible if context.is_admin: return True # No owner == task visible if task['owner'] is None: return True # Perform tests based on whether we have an owner if context.owner is not None: if context.owner == task['owner']: return True return False def _task_get(context, task_id, session=None, force_show_deleted=False): """Fetch a task entity by id""" session = session or get_session() query = session.query(models.Task).options( sa_orm.joinedload(models.Task.info) ).filter_by(id=task_id) if not force_show_deleted and not context.can_see_deleted: query = query.filter_by(deleted=False) try: task_ref = query.one() except sa_orm.exc.NoResultFound: LOG.debug("No task found with ID %s", task_id) raise exception.TaskNotFound(task_id=task_id) # Make sure the task is visible if not _is_task_visible(context, task_ref): msg = "Forbidding request, task %s is not visible" % task_id LOG.debug(msg) raise exception.Forbidden(msg) return task_ref def _task_update(context, task_ref, values, session=None): """Apply supplied dictionary of values to a task object.""" if 'deleted' not in values: values["deleted"] = False task_ref.update(values) task_ref.save(session=session) return task_ref def _task_format(task_ref, task_info_ref=None): """Format a task ref for consumption outside of this module""" task_dict = { 'id': task_ref['id'], 'type': task_ref['type'], 'status': task_ref['status'], 'owner': task_ref['owner'], 'expires_at': task_ref['expires_at'], 'created_at': task_ref['created_at'], 'updated_at': task_ref['updated_at'], 'deleted_at': task_ref['deleted_at'], 'deleted': task_ref['deleted'] } if task_info_ref: task_info_dict = { 'input': task_info_ref['input'], 'result': task_info_ref['result'], 'message': task_info_ref['message'], } task_dict.update(task_info_dict) return task_dict def metadef_namespace_get_all(context, marker=None, limit=None, sort_key=None, sort_dir=None, filters=None, session=None): """List all available namespaces.""" session = session or get_session() namespaces = metadef_namespace_api.get_all( context, session, marker, limit, sort_key, sort_dir, filters) return namespaces def metadef_namespace_get(context, namespace_name, session=None): """Get a namespace or raise if it does not exist or is not visible.""" session = session or get_session() return metadef_namespace_api.get( context, namespace_name, session) def metadef_namespace_create(context, values, session=None): """Create a namespace or raise if it already exists.""" session = session or get_session() return metadef_namespace_api.create(context, values, session) def metadef_namespace_update(context, namespace_id, namespace_dict, session=None): """Update a namespace or raise if it does not exist or not visible""" session = session or get_session() return metadef_namespace_api.update( context, namespace_id, namespace_dict, session) def metadef_namespace_delete(context, namespace_name, session=None): """Delete the namespace and all foreign references""" session = session or get_session() return metadef_namespace_api.delete_cascade( context, namespace_name, session) def metadef_object_get_all(context, namespace_name, session=None): """Get a metadata-schema object or raise if it does not exist.""" session = session or get_session() return metadef_object_api.get_all( context, namespace_name, session) def metadef_object_get(context, namespace_name, object_name, session=None): """Get a metadata-schema object or raise if it does not exist.""" session = session or get_session() return metadef_object_api.get( context, namespace_name, object_name, session) def metadef_object_create(context, namespace_name, object_dict, session=None): """Create a metadata-schema object or raise if it already exists.""" session = session or get_session() return metadef_object_api.create( context, namespace_name, object_dict, session) def metadef_object_update(context, namespace_name, object_id, object_dict, session=None): """Update an object or raise if it does not exist or not visible.""" session = session or get_session() return metadef_object_api.update( context, namespace_name, object_id, object_dict, session) def metadef_object_delete(context, namespace_name, object_name, session=None): """Delete an object or raise if namespace or object doesn't exist.""" session = session or get_session() return metadef_object_api.delete( context, namespace_name, object_name, session) def metadef_object_delete_namespace_content( context, namespace_name, session=None): """Delete an object or raise if namespace or object doesn't exist.""" session = session or get_session() return metadef_object_api.delete_by_namespace_name( context, namespace_name, session) def metadef_object_count(context, namespace_name, session=None): """Get count of properties for a namespace, raise if ns doesn't exist.""" session = session or get_session() return metadef_object_api.count(context, namespace_name, session) def metadef_property_get_all(context, namespace_name, session=None): """Get a metadef property or raise if it does not exist.""" session = session or get_session() return metadef_property_api.get_all(context, namespace_name, session) def metadef_property_get(context, namespace_name, property_name, session=None): """Get a metadef property or raise if it does not exist.""" session = session or get_session() return metadef_property_api.get( context, namespace_name, property_name, session) def metadef_property_create(context, namespace_name, property_dict, session=None): """Create a metadef property or raise if it already exists.""" session = session or get_session() return metadef_property_api.create( context, namespace_name, property_dict, session) def metadef_property_update(context, namespace_name, property_id, property_dict, session=None): """Update an object or raise if it does not exist or not visible.""" session = session or get_session() return metadef_property_api.update( context, namespace_name, property_id, property_dict, session) def metadef_property_delete(context, namespace_name, property_name, session=None): """Delete a property or raise if it or namespace doesn't exist.""" session = session or get_session() return metadef_property_api.delete( context, namespace_name, property_name, session) def metadef_property_delete_namespace_content( context, namespace_name, session=None): """Delete a property or raise if it or namespace doesn't exist.""" session = session or get_session() return metadef_property_api.delete_by_namespace_name( context, namespace_name, session) def metadef_property_count(context, namespace_name, session=None): """Get count of properties for a namespace, raise if ns doesn't exist.""" session = session or get_session() return metadef_property_api.count(context, namespace_name, session) def metadef_resource_type_create(context, values, session=None): """Create a resource_type""" session = session or get_session() return metadef_resource_type_api.create( context, values, session) def metadef_resource_type_get(context, resource_type_name, session=None): """Get a resource_type""" session = session or get_session() return metadef_resource_type_api.get( context, resource_type_name, session) def metadef_resource_type_get_all(context, session=None): """list all resource_types""" session = session or get_session() return metadef_resource_type_api.get_all(context, session) def metadef_resource_type_delete(context, resource_type_name, session=None): """Get a resource_type""" session = session or get_session() return metadef_resource_type_api.delete( context, resource_type_name, session) def metadef_resource_type_association_get( context, namespace_name, resource_type_name, session=None): session = session or get_session() return metadef_association_api.get( context, namespace_name, resource_type_name, session) def metadef_resource_type_association_create( context, namespace_name, values, session=None): session = session or get_session() return metadef_association_api.create( context, namespace_name, values, session) def metadef_resource_type_association_delete( context, namespace_name, resource_type_name, session=None): session = session or get_session() return metadef_association_api.delete( context, namespace_name, resource_type_name, session) def metadef_resource_type_association_get_all_by_namespace( context, namespace_name, session=None): session = session or get_session() return metadef_association_api.get_all_by_namespace( context, namespace_name, session) def metadef_tag_get_all( context, namespace_name, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, session=None): """Get metadata-schema tags or raise if none exist.""" session = session or get_session() return metadef_tag_api.get_all( context, namespace_name, session, filters, marker, limit, sort_key, sort_dir) def metadef_tag_get(context, namespace_name, name, session=None): """Get a metadata-schema tag or raise if it does not exist.""" session = session or get_session() return metadef_tag_api.get( context, namespace_name, name, session) def metadef_tag_create(context, namespace_name, tag_dict, session=None): """Create a metadata-schema tag or raise if it already exists.""" session = session or get_session() return metadef_tag_api.create( context, namespace_name, tag_dict, session) def metadef_tag_create_tags(context, namespace_name, tag_list, session=None): """Create a metadata-schema tag or raise if it already exists.""" session = get_session() return metadef_tag_api.create_tags( context, namespace_name, tag_list, session) def metadef_tag_update(context, namespace_name, id, tag_dict, session=None): """Update an tag or raise if it does not exist or not visible.""" session = session or get_session() return metadef_tag_api.update( context, namespace_name, id, tag_dict, session) def metadef_tag_delete(context, namespace_name, name, session=None): """Delete an tag or raise if namespace or tag doesn't exist.""" session = session or get_session() return metadef_tag_api.delete( context, namespace_name, name, session) def metadef_tag_delete_namespace_content( context, namespace_name, session=None): """Delete an tag or raise if namespace or tag doesn't exist.""" session = session or get_session() return metadef_tag_api.delete_by_namespace_name( context, namespace_name, session) def metadef_tag_count(context, namespace_name, session=None): """Get count of tags for a namespace, raise if ns doesn't exist.""" session = session or get_session() return metadef_tag_api.count(context, namespace_name, session) def artifact_create(context, values, type_name, type_version=None, session=None): session = session or get_session() artifact = glare.create(context, values, session, type_name, type_version) return artifact def artifact_delete(context, artifact_id, type_name, type_version=None, session=None): session = session or get_session() artifact = glare.delete(context, artifact_id, session, type_name, type_version) return artifact def artifact_update(context, values, artifact_id, type_name, type_version=None, session=None): session = session or get_session() artifact = glare.update(context, values, artifact_id, session, type_name, type_version) return artifact def artifact_get(context, artifact_id, type_name=None, type_version=None, show_level=ga.Showlevel.BASIC, session=None): session = session or get_session() return glare.get(context, artifact_id, session, type_name, type_version, show_level) def artifact_publish(context, artifact_id, type_name, type_version=None, session=None): session = session or get_session() return glare.publish(context, artifact_id, session, type_name, type_version) def artifact_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, show_level=ga.Showlevel.NONE, session=None): session = session or get_session() return glare.get_all(context, session, marker, limit, sort_keys, sort_dirs, filters, show_level) glance-12.0.0/glance/db/metadata.py0000664000567000056710000000411212701407047020134 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2013 OpenStack Foundation # Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Metadata setup commands.""" import threading from oslo_config import cfg from oslo_db import options as db_options from stevedore import driver from glance.db.sqlalchemy import api as db_api _IMPL = None _LOCK = threading.Lock() db_options.set_defaults(cfg.CONF) def get_backend(): global _IMPL if _IMPL is None: with _LOCK: if _IMPL is None: _IMPL = driver.DriverManager( "glance.database.metadata_backend", cfg.CONF.database.backend).driver return _IMPL def load_metadefs(): """Read metadefinition files and insert data into the database""" return get_backend().db_load_metadefs(engine=db_api.get_engine(), metadata_path=None, merge=False, prefer_new=False, overwrite=False) def unload_metadefs(): """Unload metadefinitions from database""" return get_backend().db_unload_metadefs(engine=db_api.get_engine()) def export_metadefs(): """Export metadefinitions from database to files""" return get_backend().db_export_metadefs(engine=db_api.get_engine(), metadata_path=None) glance-12.0.0/glance/db/migration.py0000664000567000056710000000361212701407047020351 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" import os import threading from oslo_config import cfg from oslo_db import options as db_options from stevedore import driver from glance.db.sqlalchemy import api as db_api _IMPL = None _LOCK = threading.Lock() db_options.set_defaults(cfg.CONF) def get_backend(): global _IMPL if _IMPL is None: with _LOCK: if _IMPL is None: _IMPL = driver.DriverManager( "glance.database.migration_backend", cfg.CONF.database.backend).driver return _IMPL INIT_VERSION = 0 MIGRATE_REPO_PATH = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'sqlalchemy', 'migrate_repo', ) def db_sync(version=None, init_version=0, engine=None): """Migrate the database to `version` or the most recent version.""" if engine is None: engine = db_api.get_engine() return get_backend().db_sync(engine=engine, abs_path=MIGRATE_REPO_PATH, version=version, init_version=init_version) glance-12.0.0/glance/api/0000775000567000056710000000000012701407204016163 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/glare/0000775000567000056710000000000012701407204017255 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/glare/versions.py0000664000567000056710000000471512701407047021513 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import http_client import webob.dec from glance.common import wsgi from glance import i18n _ = i18n._ versions_opts = [ cfg.StrOpt('public_endpoint', help=_('Public url to use for versions endpoint. The default ' 'is None, which will use the request\'s host_url ' 'attribute to populate the URL base. If Glance is ' 'operating behind a proxy, you will want to change ' 'this to represent the proxy\'s URL.')), ] CONF = cfg.CONF CONF.register_opts(versions_opts) class Controller(object): """A wsgi controller that reports which API versions are supported.""" def index(self, req, explicit=False): """Respond to a request for all OpenStack API versions.""" def build_version_object(version, path, status): url = CONF.public_endpoint or req.host_url return { 'id': 'v%s' % version, 'status': status, 'links': [ { 'rel': 'self', 'href': '%s/%s/' % (url, path), }, ], } version_objs = [build_version_object(0.1, 'v0.1', 'EXPERIMENTAL')] status = explicit and http_client.OK or http_client.MULTIPLE_CHOICES response = webob.Response(request=req, status=status, content_type='application/json') response.body = jsonutils.dump_as_bytes(dict(versions=version_objs)) return response @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): return self.index(req) def create_resource(conf): return wsgi.Resource(Controller()) glance-12.0.0/glance/api/glare/__init__.py0000664000567000056710000000000012701407047021361 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/glare/v0_1/0000775000567000056710000000000012701407204020022 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/glare/v0_1/__init__.py0000664000567000056710000000000012701407047022126 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/glare/v0_1/router.py0000664000567000056710000001005312701407047021720 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api.glare.v0_1 import glare from glance.common import wsgi UUID_REGEX = ( R'[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}') class API(wsgi.Router): def _get_artifacts_resource(self): if not self.artifacts_resource: self.artifacts_resource = glare.create_resource() return self.artifacts_resource def __init__(self, mapper): self.artifacts_resource = None artifacts_resource = self._get_artifacts_resource() reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) def _check_json_content_type(environ, result): return "application/json" in environ["CONTENT_TYPE"] def _check_octet_stream_content_type(environ, result): return "application/octet-stream" in environ["CONTENT_TYPE"] def connect_routes(m, read_only): with m.submapper(resource_name="artifact_operations", path_prefix="/{id}", requirements={'id': UUID_REGEX}) as art: art.show() if not read_only: art.delete() art.action('update', method='PATCH') art.link('publish', method='POST') def connect_attr_action(attr): if not read_only: attr.action("upload", conditions={ 'method': ["POST", "PUT"], 'function': _check_octet_stream_content_type}) attr.action("update_property", conditions={ 'method': ["POST", "PUT"], 'function': _check_json_content_type}) attr.link("download", method="GET") attr_map = art.submapper(resource_name="attr_operations", path_prefix="/{attr}", path_left=None) attr_items = art.submapper( resource_name="attr_item_ops", path_prefix="/{attr}/{path_left:.*}") connect_attr_action(attr_map) connect_attr_action(attr_items) m.connect("", action='list', conditions={'method': 'GET'}, state='active') m.connect("/drafts", action='list', conditions={'method': 'GET'}, state='creating') if not read_only: m.connect("/drafts", action='create', conditions={'method': 'POST'}) mapper.connect('/artifacts', controller=artifacts_resource, action='list_artifact_types', conditions={'method': ['GET']}) versioned = mapper.submapper(path_prefix='/artifacts/{type_name}/' 'v{type_version}', controller=artifacts_resource) non_versioned = mapper.submapper(path_prefix='/artifacts/{type_name}', type_version=None, controller=artifacts_resource) connect_routes(versioned, False) connect_routes(non_versioned, True) mapper.connect('/artifacts', controller=reject_method_resource, action='reject', allowed_methods='GET') super(API, self).__init__(mapper) glance-12.0.0/glance/api/glare/v0_1/glare.py0000664000567000056710000011553712701407047021507 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import glance_store import jsonschema from oslo_config import cfg from oslo_serialization import jsonutils as json from oslo_utils import encodeutils from oslo_utils import excutils import semantic_version import six import six.moves.urllib.parse as urlparse import webob.exc from glance.common import exception from glance.common.glare import loader from glance.common.glare import serialization from glance.common import jsonpatchvalidator from glance.common import utils from glance.common import wsgi import glance.db from glance.glare import gateway from glance.glare import Showlevel from glance.i18n import _, _LE from oslo_log import log as logging LOG = logging.getLogger(__name__) possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") class ArtifactsController(object): def __init__(self, db_api=None, store_api=None, plugins=None): self.db_api = db_api or glance.db.get_api() self.store_api = store_api or glance_store self.plugins = plugins or loader.ArtifactsPluginLoader( 'glance.artifacts.types') self.gateway = gateway.Gateway(self.db_api, self.store_api, self.plugins) @staticmethod def _do_update_op(artifact, change): """Call corresponding method of the updater proxy. Here 'change' is a typical jsonpatch request dict: * 'path' - a json-pointer string; * 'op' - one of the allowed operation types; * 'value' - value to set (omitted when op = remove) """ update_op = getattr(artifact, change['op']) update_op(change['path'], change.get('value')) return artifact @staticmethod def _get_artifact_with_dependencies(repo, art_id, type_name=None, type_version=None): """Retrieves an artifact with dependencies from db by its id. Show level is direct (only direct dependencies are shown). """ return repo.get(art_id, show_level=Showlevel.DIRECT, type_name=type_name, type_version=type_version) def show(self, req, type_name, type_version, show_level=Showlevel.TRANSITIVE, **kwargs): """Retrieves one artifact by id with its dependencies""" artifact_repo = self.gateway.get_artifact_repo(req.context) try: art_id = kwargs.get('id') artifact = artifact_repo.get(art_id, type_name=type_name, type_version=type_version, show_level=show_level) return artifact except exception.ArtifactNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) def list(self, req, type_name, type_version, state, **kwargs): """Retrieves a list of artifacts that match some params""" artifact_repo = self.gateway.get_artifact_repo(req.context) filters = kwargs.pop('filters', {}) filters.update(type_name={'value': type_name}, state={'value': state}) if type_version is not None: filters['type_version'] = {'value': type_version} if 'version' in filters: for filter in filters['version']: if filter['value'] == 'latest': if 'name' not in filters: raise webob.exc.HTTPBadRequest( 'Filtering by latest version without specifying' ' a name is not supported.') filter['value'] = self._get_latest_version( req, filters['name'][0]['value'], type_name, type_version) else: try: semantic_version.Version(filter['value'], partial=True) except ValueError: msg = (_('The format of the version %s is not valid. ' 'Use semver notation') % filter['value']) raise webob.exc.HTTPBadRequest(explanation=msg) res = artifact_repo.list(filters=filters, show_level=Showlevel.BASIC, **kwargs) result = {'artifacts': res} limit = kwargs.get("limit") if limit is not None and len(res) != 0 and len(res) == limit: result['next_marker'] = res[-1].id return result def _get_latest_version(self, req, name, type_name, type_version=None, state='creating'): artifact_repo = self.gateway.get_artifact_repo(req.context) filters = dict(name=[{"value": name}], type_name={"value": type_name}, state={"value": state}) if type_version is not None: filters["type_version"] = {"value": type_version} result = artifact_repo.list(filters=filters, show_level=Showlevel.NONE, sort_keys=[('version', None)]) if len(result): return result[0].version msg = "No artifacts have been found" raise exception.ArtifactNotFound(message=msg) @utils.mutating def create(self, req, artifact_type, artifact_data, **kwargs): try: artifact_factory = self.gateway.get_artifact_type_factory( req.context, artifact_type) new_artifact = artifact_factory.new_artifact(**artifact_data) artifact_repo = self.gateway.get_artifact_repo(req.context) artifact_repo.add(new_artifact) # retrieve artifact from db return self._get_artifact_with_dependencies(artifact_repo, new_artifact.id) except (TypeError, exception.ArtifactNotFound, exception.Invalid, exception.DuplicateLocation) as e: raise webob.exc.HTTPBadRequest(explanation=e) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.LimitExceeded as e: raise webob.exc.HTTPRequestEntityTooLarge( explanation=e.msg, request=req, content_type='text/plain') except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) @utils.mutating def update_property(self, req, id, type_name, type_version, path, data, **kwargs): """Updates a single property specified by request url.""" artifact_repo = self.gateway.get_artifact_repo(req.context) try: artifact = self._get_artifact_with_dependencies(artifact_repo, id, type_name, type_version) self._ensure_write_access(artifact, req.context) if artifact.metadata.attributes.blobs.get(path) is not None: msg = _('Invalid Content-Type for work with %s') % path raise webob.exc.HTTPBadRequest(explanation=msg) # use updater mixin to perform updates: generate update path if req.method == "PUT": # replaces existing value or creates a new one if getattr(artifact, kwargs["attr"]): artifact.replace(path=path, value=data) else: artifact.add(path=path, value=data) else: # append to an existing value or create a new one artifact.add(path=path, value=data) artifact_repo.save(artifact) return self._get_artifact_with_dependencies(artifact_repo, id) except (exception.InvalidArtifactPropertyValue, exception.ArtifactInvalidProperty, exception.InvalidJsonPatchPath, exception.ArtifactCircularDependency) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) @utils.mutating def update(self, req, id, type_name, type_version, changes, **kwargs): """Performs an update via json patch request""" artifact_repo = self.gateway.get_artifact_repo(req.context) try: artifact = self._get_artifact_with_dependencies(artifact_repo, id, type_name, type_version) self._ensure_write_access(artifact, req.context) updated = artifact for change in changes: if artifact.metadata.attributes.blobs.get(change['path']): msg = _('Invalid request PATCH for work with blob') raise webob.exc.HTTPBadRequest(explanation=msg) else: updated = self._do_update_op(updated, change) artifact_repo.save(updated) return self._get_artifact_with_dependencies(artifact_repo, id) except (exception.InvalidJsonPatchPath, exception.Invalid) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.StorageQuotaFull as e: msg = (_("Denying attempt to upload artifact because it exceeds " "the quota: %s") % encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPRequestEntityTooLarge( explanation=msg, request=req, content_type='text/plain') except exception.LimitExceeded as e: raise webob.exc.HTTPRequestEntityTooLarge( explanation=e.msg, request=req, content_type='text/plain') except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) @utils.mutating def delete(self, req, id, type_name, type_version, **kwargs): artifact_repo = self.gateway.get_artifact_repo(req.context) try: artifact = self._get_artifact_with_dependencies( artifact_repo, id, type_name=type_name, type_version=type_version) self._ensure_write_access(artifact, req.context) artifact_repo.remove(artifact) except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except (glance_store.Forbidden, exception.Forbidden) as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except (glance_store.NotFound, exception.NotFound) as e: msg = (_("Failed to find artifact %(artifact_id)s to delete") % {'artifact_id': id}) raise webob.exc.HTTPNotFound(explanation=msg) except glance_store.exceptions.InUseByStore as e: msg = (_("Artifact %s could not be deleted " "because it is in use: %s") % (id, e.msg)) # noqa raise webob.exc.HTTPConflict(explanation=msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) @utils.mutating def publish(self, req, id, type_name, type_version, **kwargs): artifact_repo = self.gateway.get_artifact_repo(req.context) try: artifact = self._get_artifact_with_dependencies( artifact_repo, id, type_name=type_name, type_version=type_version) self._ensure_write_access(artifact, req.context) return artifact_repo.publish(artifact, context=req.context) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) def _upload_list_property(self, method, blob_list, index, data, size): if method == 'PUT' and not index and len(blob_list) > 0: # PUT replaces everything, so PUT to non-empty collection is # forbidden raise webob.exc.HTTPMethodNotAllowed( explanation=_("Unable to PUT to non-empty collection")) if index is not None and index > len(blob_list): raise webob.exc.HTTPBadRequest( explanation=_("Index is out of range")) if index is None: # both POST and PUT create a new blob list blob_list.append((data, size)) elif method == 'POST': blob_list.insert(index, (data, size)) else: blob_list[index] = (data, size) @utils.mutating def upload(self, req, id, type_name, type_version, attr, size, data, index, **kwargs): artifact_repo = self.gateway.get_artifact_repo(req.context) artifact = None try: artifact = self._get_artifact_with_dependencies(artifact_repo, id, type_name, type_version) self._ensure_write_access(artifact, req.context) blob_prop = artifact.metadata.attributes.blobs.get(attr) if blob_prop is None: raise webob.exc.HTTPBadRequest( explanation=_("Not a blob property '%s'") % attr) if isinstance(blob_prop, list): blob_list = getattr(artifact, attr) self._upload_list_property(req.method, blob_list, index, data, size) else: if index is not None: raise webob.exc.HTTPBadRequest( explanation=_("Not a list property '%s'") % attr) setattr(artifact, attr, (data, size)) artifact_repo.save(artifact) return artifact except ValueError as e: exc_message = encodeutils.exception_to_unicode(e) LOG.debug("Cannot save data for artifact %(id)s: %(e)s", {'id': id, 'e': exc_message}) self._restore(artifact_repo, artifact) raise webob.exc.HTTPBadRequest( explanation=exc_message) except glance_store.StoreAddDisabled: msg = _("Error in store configuration. Adding artifacts to store " "is disabled.") LOG.exception(msg) self._restore(artifact_repo, artifact) raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except (glance_store.Duplicate, exception.InvalidImageStatusTransition) as e: LOG.exception(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPConflict(explanation=e.msg, request=req) except exception.Forbidden as e: msg = ("Not allowed to upload data for artifact %s" % id) LOG.debug(msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Artifact storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(artifact_repo, artifact) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Artifact exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(artifact_repo, artifact) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming artifact blob is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(artifact_repo, artifact) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on artifact " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(artifact_repo, artifact) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except webob.exc.HTTPGone as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload artifact blob data due to" " HTTP error")) except webob.exc.HTTPError as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload artifact blob data due to HTTP" " error")) self._restore(artifact_repo, artifact) except Exception as e: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to upload artifact blob data due to " "internal error")) self._restore(artifact_repo, artifact) def download(self, req, id, type_name, type_version, attr, index, **kwargs): artifact_repo = self.gateway.get_artifact_repo(req.context) try: artifact = artifact_repo.get(id, type_name, type_version) if attr in artifact.metadata.attributes.blobs: if isinstance(artifact.metadata.attributes.blobs[attr], list): if index is None: raise webob.exc.HTTPBadRequest( explanation=_("Index is required")) blob_list = getattr(artifact, attr) try: return blob_list[index] except IndexError as e: raise webob.exc.HTTPBadRequest(explanation=e.message) else: if index is not None: raise webob.exc.HTTPBadRequest(_("Not a list " "property")) return getattr(artifact, attr) else: message = _("Not a downloadable entity") raise webob.exc.HTTPBadRequest(explanation=message) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except (glance_store.NotFound, exception.NotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) def _restore(self, artifact_repo, artifact): """Restore the artifact to queued status. :param artifact_repo: The instance of ArtifactRepo :param artifact: The artifact will be restored """ try: if artifact_repo and artifact: artifact.state = 'creating' artifact_repo.save(artifact) except Exception as e: msg = (_LE("Unable to restore artifact %(artifact_id)s: %(e)s") % {'artifact_id': artifact.id, 'e': encodeutils.exception_to_unicode(e)}) LOG.exception(msg) def list_artifact_types(self, req): plugins = self.plugins.plugin_map response = [] base_link = "%s/v0.1/artifacts" % (CONF.public_endpoint or req.host_url) for type_name, plugin in six.iteritems(plugins.get("by_typename")): metadata = dict( type_name=type_name, displayed_name=plugin[0].metadata.type_display_name, versions=[] ) for version in plugin: endpoint = version.metadata.endpoint type_version = "v" + version.metadata.type_version version_metadata = dict( id=type_version, link="%s/%s/%s" % (base_link, endpoint, type_version) ) type_description = version.metadata.type_description if type_description is not None: version_metadata['description'] = type_description metadata['versions'].append(version_metadata) response.append(metadata) return {"artifact_types": response} @staticmethod def _ensure_write_access(artifact, context): if context.is_admin: return if context.owner is None or context.owner != artifact.owner: raise exception.ArtifactForbidden(id=artifact.id) class RequestDeserializer(wsgi.JSONRequestDeserializer, jsonpatchvalidator.JsonPatchValidatorMixin): _available_sort_keys = ('name', 'status', 'container_format', 'disk_format', 'size', 'id', 'created_at', 'updated_at', 'version') _default_sort_dir = 'desc' _max_limit_number = 1000 def __init__(self, schema=None, plugins=None): super(RequestDeserializer, self).__init__( methods_allowed=["replace", "remove", "add"]) self.plugins = plugins or loader.ArtifactsPluginLoader( 'glance.artifacts.types') def _validate_show_level(self, show_level): try: return Showlevel.from_str(show_level.strip().lower()) except exception.ArtifactUnsupportedShowLevel as e: raise webob.exc.HTTPBadRequest(explanation=e.message) def show(self, req): res = self._process_type_from_request(req, True) params = req.params.copy() show_level = params.pop('show_level', None) if show_level is not None: res['show_level'] = self._validate_show_level(show_level) return res def _get_request_body(self, req): output = super(RequestDeserializer, self).default(req) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def validate_body(self, request): try: body = self._get_request_body(request) return super(RequestDeserializer, self).validate_body(body) except exception.JsonPatchException as e: raise webob.exc.HTTPBadRequest(explanation=e) def default(self, request): return self._process_type_from_request(request) def _check_type_version(self, type_version): try: semantic_version.Version(type_version, partial=True) except ValueError as e: raise webob.exc.HTTPBadRequest(explanation=e) def _process_type_from_request(self, req, allow_implicit_version=False): try: type_name = req.urlvars.get('type_name') type_version = req.urlvars.get('type_version') if type_version is not None: self._check_type_version(type_version) # Even if the type_version is not specified and # 'allow_implicit_version' is False, this call is still needed to # ensure that at least one version of this type exists. artifact_type = self.plugins.get_class_by_endpoint(type_name, type_version) res = { 'type_name': artifact_type.metadata.type_name, 'type_version': artifact_type.metadata.type_version if type_version is not None else None } if allow_implicit_version: res['artifact_type'] = artifact_type return res except exception.ArtifactPluginNotFound as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) def _validate_headers(self, req, content_type='application/json'): header = req.headers.get('Content-Type') if header != content_type: msg = _('Invalid headers "Content-Type": %s') % header raise webob.exc.HTTPBadRequest(explanation=msg) def create(self, req): self._validate_headers(req) res = self._process_type_from_request(req, True) res["artifact_data"] = self._get_request_body(req) return res def update(self, req): self._validate_headers(req) res = self._process_type_from_request(req) res["changes"] = self.validate_body(req) return res def update_property(self, req): self._validate_headers(req) """Data is expected in form {'data': ...}""" res = self._process_type_from_request(req) data_schema = { "type": "object", "properties": {"data": {}}, "required": ["data"], "$schema": "http://json-schema.org/draft-04/schema#"} try: json_body = json.loads(req.body) jsonschema.validate(json_body, data_schema) # TODO(ivasilevskaya): # by now the deepest nesting level == 1 (ex. some_list/3), # has to be fixed for dict properties attr = req.urlvars["attr"] path_left = req.urlvars["path_left"] path = (attr if not path_left else "%(attr)s/%(path_left)s" % {'attr': attr, 'path_left': path_left}) res.update(data=json_body["data"], path=path) return res except (ValueError, jsonschema.ValidationError) as e: msg = _("Invalid json body: %s") % e.message raise webob.exc.HTTPBadRequest(explanation=msg) def upload(self, req): self._validate_headers(req, content_type='application/octet-stream') res = self._process_type_from_request(req) index = req.urlvars.get('path_left') try: # for blobs only one level of indexing is supported # (ex. bloblist/0) if index is not None: index = int(index) except ValueError: msg = _("Only list indexes are allowed for blob lists") raise webob.exc.HTTPBadRequest(explanation=msg) artifact_size = req.content_length or None res.update(size=artifact_size, data=req.body_file, index=index) return res def download(self, req): res = self._process_type_from_request(req) index = req.urlvars.get('index') if index is not None: index = int(index) res.update(index=index) return res def _validate_limit(self, limit): if limit is None: return self._max_limit_number try: limit = int(limit) except ValueError: msg = _("Limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("Limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) if limit > self._max_limit_number: msg = _("Limit param" " must not be higher than %d") % self._max_limit_number raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_sort_key(self, sort_key, artifact_type, type_version=None): if sort_key in self._available_sort_keys: return sort_key, None elif type_version is None: msg = (_('Invalid sort key: %(sort_key)s. ' 'If type version is not set it must be one of' ' the following: %(available)s.') % {'sort_key': sort_key, 'available': ', '.join(self._available_sort_keys)}) raise webob.exc.HTTPBadRequest(explanation=msg) prop_type = artifact_type.metadata.attributes.all.get(sort_key) if prop_type is None or prop_type.DB_TYPE not in ['string', 'numeric', 'int', 'bool']: msg = (_('Invalid sort key: %(sort_key)s. ' 'You cannot sort by this property') % {'sort_key': sort_key}) raise webob.exc.HTTPBadRequest(explanation=msg) return sort_key, prop_type.DB_TYPE def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_sorting_params(self, params, artifact_type, type_version=None): sort_keys = [] sort_dirs = [] if 'sort' in params: for sort_param in params.pop('sort').strip().split(','): key, _sep, dir = sort_param.partition(':') if not dir: dir = self._default_sort_dir sort_keys.append(self._validate_sort_key(key.strip(), artifact_type, type_version)) sort_dirs.append(self._validate_sort_dir(dir.strip())) if not sort_keys: sort_keys = [('created_at', None)] if not sort_dirs: sort_dirs = [self._default_sort_dir] return sort_keys, sort_dirs def _bring_to_type(self, type_name, value): mapper = {'int': int, 'string': str, 'text': str, 'bool': bool, 'numeric': float} return mapper[type_name](value) def _get_filters(self, artifact_type, params): error_msg = 'Unexpected filter property' filters = dict() for filter, raw_value in params.items(): # first, get the comparison operator left, sep, right = raw_value.strip().partition(':') if not sep: op = "default" value = left.strip() else: op = left.strip().upper() value = right.strip() # then, understand what's the property to filter and its value if '.' in filter: # Indicates a dict-valued property with a key prop_name, key = filter.split('.', 1) else: prop_name = filter key = None prop_type = artifact_type.metadata.attributes.all.get(prop_name) if prop_type is None: raise webob.exc.HTTPBadRequest(error_msg) key_only_check = False position = None if isinstance(prop_type, dict): if key is None: key = value val = None key_only_check = True else: val = value if isinstance(prop_type.properties, dict): # This one is to handle the case of composite dict, having # different types of values at different keys, i.e. object prop_type = prop_type.properties.get(key) if prop_type is None: raise webob.exc.HTTPBadRequest(error_msg) else: prop_type = prop_type.properties property_name = prop_name + '.' + key property_value = val else: if key is not None: raise webob.exc.HTTPBadRequest(error_msg) property_name = prop_name property_value = value # now detect the value DB type if prop_type.DB_TYPE is not None: str_type = prop_type.DB_TYPE elif isinstance(prop_type, list): if not isinstance(prop_type.item_type, list): position = "any" str_type = prop_type.item_type.DB_TYPE else: raise webob.exc.HTTPBadRequest('Filtering by tuple-like' ' fields is not supported') else: raise webob.exc.HTTPBadRequest(error_msg) if property_value is not None: property_value = self._bring_to_type(str_type, property_value) # convert the default operation to NE, EQ or IN if key_only_check: if op == 'default': op = 'NE' else: raise webob.exc.HTTPBadRequest('Comparison not supported ' 'for key-only filtering') else: if op == 'default': op = 'IN' if isinstance(prop_type, list) else 'EQ' filters.setdefault(property_name, []) filters[property_name].append(dict(operator=op, position=position, value=property_value, type=str_type)) return filters def list(self, req): res = self._process_type_from_request(req, True) params = req.params.copy() show_level = params.pop('show_level', None) if show_level is not None: res['show_level'] = self._validate_show_level(show_level.strip()) limit = params.pop('limit', None) marker = params.pop('marker', None) query_params = dict() query_params['sort_keys'], query_params['sort_dirs'] = ( self._get_sorting_params(params, res['artifact_type'], res['type_version'])) if marker is not None: query_params['marker'] = marker query_params['limit'] = self._validate_limit(limit) query_params['filters'] = self._get_filters(res['artifact_type'], params) query_params['type_name'] = res['artifact_type'].metadata.type_name return query_params def list_artifact_types(self, req): return {} class ResponseSerializer(wsgi.JSONResponseSerializer): # TODO(ivasilevskaya): ideally this should be autogenerated/loaded ARTIFACTS_ENDPOINT = '/v0.1/artifacts' fields = ['id', 'name', 'version', 'type_name', 'type_version', 'visibility', 'state', 'owner', 'scope', 'created_at', 'updated_at', 'tags', 'dependencies', 'blobs', 'properties'] def __init__(self, schema=None): super(ResponseSerializer, self).__init__() def default(self, response, res): artifact = serialization.serialize_for_client( res, show_level=Showlevel.DIRECT) body = json.dumps(artifact, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def create(self, response, artifact): response.status_int = 201 self.default(response, artifact) response.location = ( '%(root_url)s/%(type_name)s/v%(type_version)s/%(id)s' % dict( root_url=ResponseSerializer.ARTIFACTS_ENDPOINT, type_name=artifact.metadata.endpoint, type_version=artifact.metadata.type_version, id=artifact.id)) def list(self, response, res): params = dict(response.request.params) params.pop('marker', None) query = urlparse.urlencode(params) type_name = response.request.urlvars.get('type_name') type_version = response.request.urlvars.get('type_version') if response.request.urlvars.get('state') == 'creating': drafts = "/drafts" else: drafts = "" artifacts_list = [ serialization.serialize_for_client(a, show_level=Showlevel.NONE) for a in res['artifacts']] url = "/v0.1/artifacts" if type_name: url += "/" + type_name if type_version: url += "/v" + type_version url += drafts if query: first_url = url + "?" + query else: first_url = url body = { "artifacts": artifacts_list, "first": first_url } if 'next_marker' in res: params['marker'] = res['next_marker'] next_query = urlparse.urlencode(params) body['next'] = url + '?' + next_query content = json.dumps(body, ensure_ascii=False) response.unicode_body = six.text_type(content) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 def download(self, response, blob): response.headers['Content-Type'] = 'application/octet-stream' response.app_iter = iter(blob.data_stream) if blob.checksum: response.headers['Content-MD5'] = blob.checksum response.headers['Content-Length'] = str(blob.size) def list_artifact_types(self, response, res): body = json.dumps(res, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def create_resource(): """Images resource factory method""" plugins = loader.ArtifactsPluginLoader('glance.artifacts.types') deserializer = RequestDeserializer(plugins=plugins) serializer = ResponseSerializer() controller = ArtifactsController(plugins=plugins) return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/property_protections.py0000664000567000056710000001175012701407047023063 0ustar jenkinsjenkins00000000000000# Copyright 2013 Rackspace # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.common import exception import glance.domain.proxy class ProtectedImageFactoryProxy(glance.domain.proxy.ImageFactory): def __init__(self, image_factory, context, property_rules): self.image_factory = image_factory self.context = context self.property_rules = property_rules kwargs = {'context': self.context, 'property_rules': self.property_rules} super(ProtectedImageFactoryProxy, self).__init__( image_factory, proxy_class=ProtectedImageProxy, proxy_kwargs=kwargs) def new_image(self, **kwargs): extra_props = kwargs.pop('extra_properties', {}) extra_properties = {} for key in extra_props.keys(): if self.property_rules.check_property_rules(key, 'create', self.context): extra_properties[key] = extra_props[key] else: raise exception.ReservedProperty(property=key) return super(ProtectedImageFactoryProxy, self).new_image( extra_properties=extra_properties, **kwargs) class ProtectedImageRepoProxy(glance.domain.proxy.Repo): def __init__(self, image_repo, context, property_rules): self.context = context self.image_repo = image_repo self.property_rules = property_rules proxy_kwargs = {'context': self.context} super(ProtectedImageRepoProxy, self).__init__( image_repo, item_proxy_class=ProtectedImageProxy, item_proxy_kwargs=proxy_kwargs) def get(self, image_id): return ProtectedImageProxy(self.image_repo.get(image_id), self.context, self.property_rules) def list(self, *args, **kwargs): images = self.image_repo.list(*args, **kwargs) return [ProtectedImageProxy(image, self.context, self.property_rules) for image in images] class ProtectedImageProxy(glance.domain.proxy.Image): def __init__(self, image, context, property_rules): self.image = image self.context = context self.property_rules = property_rules self.image.extra_properties = ExtraPropertiesProxy( self.context, self.image.extra_properties, self.property_rules) super(ProtectedImageProxy, self).__init__(self.image) class ExtraPropertiesProxy(glance.domain.ExtraProperties): def __init__(self, context, extra_props, property_rules): self.context = context self.property_rules = property_rules extra_properties = {} for key in extra_props.keys(): if self.property_rules.check_property_rules(key, 'read', self.context): extra_properties[key] = extra_props[key] super(ExtraPropertiesProxy, self).__init__(extra_properties) def __getitem__(self, key): if self.property_rules.check_property_rules(key, 'read', self.context): return dict.__getitem__(self, key) else: raise KeyError def __setitem__(self, key, value): # NOTE(isethi): Exceptions are raised only for actions update, delete # and create, where the user proactively interacts with the properties. # A user cannot request to read a specific property, hence reads do # raise an exception try: if self.__getitem__(key) is not None: if self.property_rules.check_property_rules(key, 'update', self.context): return dict.__setitem__(self, key, value) else: raise exception.ReservedProperty(property=key) except KeyError: if self.property_rules.check_property_rules(key, 'create', self.context): return dict.__setitem__(self, key, value) else: raise exception.ReservedProperty(property=key) def __delitem__(self, key): if key not in super(ExtraPropertiesProxy, self).keys(): raise KeyError if self.property_rules.check_property_rules(key, 'delete', self.context): return dict.__delitem__(self, key) else: raise exception.ReservedProperty(property=key) glance-12.0.0/glance/api/policy.py0000664000567000056710000006146612701407047020056 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Glance""" import copy from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy from glance.common import exception import glance.domain.proxy from glance.i18n import _ LOG = logging.getLogger(__name__) CONF = cfg.CONF DEFAULT_RULES = policy.Rules.from_dict({ 'context_is_admin': 'role:admin', 'default': '@', 'manage_image_cache': 'role:admin', }) class Enforcer(policy.Enforcer): """Responsible for loading and enforcing rules""" def __init__(self): if CONF.find_file(CONF.oslo_policy.policy_file): kwargs = dict(rules=None, use_conf=True) else: kwargs = dict(rules=DEFAULT_RULES, use_conf=False) super(Enforcer, self).__init__(CONF, overwrite=False, **kwargs) def add_rules(self, rules): """Add new rules to the Rules object""" self.set_rules(rules, overwrite=False, use_conf=self.use_conf) def enforce(self, context, action, target): """Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param target: Dictionary representing the object of the action. :raises: `glance.common.exception.Forbidden` :returns: A non-False value if access is allowed. """ credentials = { 'roles': context.roles, 'user': context.user, 'tenant': context.tenant, } return super(Enforcer, self).enforce(action, target, credentials, do_raise=True, exc=exception.Forbidden, action=action) def check(self, context, action, target): """Verifies that the action is valid on the target in this context. :param context: Glance request context :param action: String representing the action to be checked :param target: Dictionary representing the object of the action. :returns: A non-False value if access is allowed. """ credentials = { 'roles': context.roles, 'user': context.user, 'tenant': context.tenant, } return super(Enforcer, self).enforce(action, target, credentials) def check_is_admin(self, context): """Check if the given context is associated with an admin role, as defined via the 'context_is_admin' RBAC rule. :param context: Glance request context :returns: A non-False value if context role is admin. """ return self.check(context, 'context_is_admin', context.to_dict()) class ImageRepoProxy(glance.domain.proxy.Repo): def __init__(self, image_repo, context, policy): self.context = context self.policy = policy self.image_repo = image_repo proxy_kwargs = {'context': self.context, 'policy': self.policy} super(ImageRepoProxy, self).__init__(image_repo, item_proxy_class=ImageProxy, item_proxy_kwargs=proxy_kwargs) def get(self, image_id): try: image = super(ImageRepoProxy, self).get(image_id) except exception.NotFound: self.policy.enforce(self.context, 'get_image', {}) raise else: self.policy.enforce(self.context, 'get_image', ImageTarget(image)) return image def list(self, *args, **kwargs): self.policy.enforce(self.context, 'get_images', {}) return super(ImageRepoProxy, self).list(*args, **kwargs) def save(self, image, from_state=None): self.policy.enforce(self.context, 'modify_image', image.target) return super(ImageRepoProxy, self).save(image, from_state=from_state) def add(self, image): self.policy.enforce(self.context, 'add_image', image.target) return super(ImageRepoProxy, self).add(image) class ImageProxy(glance.domain.proxy.Image): def __init__(self, image, context, policy): self.image = image self.target = ImageTarget(image) self.context = context self.policy = policy super(ImageProxy, self).__init__(image) @property def visibility(self): return self.image.visibility @visibility.setter def visibility(self, value): if value == 'public': self.policy.enforce(self.context, 'publicize_image', self.target) self.image.visibility = value @property def locations(self): return ImageLocationsProxy(self.image.locations, self.context, self.policy) @locations.setter def locations(self, value): if not isinstance(value, (list, ImageLocationsProxy)): raise exception.Invalid(_('Invalid locations: %s') % value) self.policy.enforce(self.context, 'set_image_location', self.target) new_locations = list(value) if (set([loc['url'] for loc in self.image.locations]) - set([loc['url'] for loc in new_locations])): self.policy.enforce(self.context, 'delete_image_location', self.target) self.image.locations = new_locations def delete(self): self.policy.enforce(self.context, 'delete_image', self.target) return self.image.delete() def deactivate(self): LOG.debug('Attempting deactivate') target = ImageTarget(self.image) self.policy.enforce(self.context, 'deactivate', target=target) LOG.debug('Deactivate allowed, continue') self.image.deactivate() def reactivate(self): LOG.debug('Attempting reactivate') target = ImageTarget(self.image) self.policy.enforce(self.context, 'reactivate', target=target) LOG.debug('Reactivate allowed, continue') self.image.reactivate() def get_data(self, *args, **kwargs): self.policy.enforce(self.context, 'download_image', self.target) return self.image.get_data(*args, **kwargs) def set_data(self, *args, **kwargs): self.policy.enforce(self.context, 'upload_image', self.target) return self.image.set_data(*args, **kwargs) class ImageMemberProxy(glance.domain.proxy.ImageMember): def __init__(self, image_member, context, policy): super(ImageMemberProxy, self).__init__(image_member) self.image_member = image_member self.context = context self.policy = policy class ImageFactoryProxy(glance.domain.proxy.ImageFactory): def __init__(self, image_factory, context, policy): self.image_factory = image_factory self.context = context self.policy = policy proxy_kwargs = {'context': self.context, 'policy': self.policy} super(ImageFactoryProxy, self).__init__(image_factory, proxy_class=ImageProxy, proxy_kwargs=proxy_kwargs) def new_image(self, **kwargs): if kwargs.get('visibility') == 'public': self.policy.enforce(self.context, 'publicize_image', {}) return super(ImageFactoryProxy, self).new_image(**kwargs) class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory): def __init__(self, member_factory, context, policy): super(ImageMemberFactoryProxy, self).__init__( member_factory, proxy_class=ImageMemberProxy, proxy_kwargs={'context': context, 'policy': policy}) class ImageMemberRepoProxy(glance.domain.proxy.Repo): def __init__(self, member_repo, image, context, policy): self.member_repo = member_repo self.image = image self.target = ImageTarget(image) self.context = context self.policy = policy def add(self, member): self.policy.enforce(self.context, 'add_member', self.target) self.member_repo.add(member) def get(self, member_id): self.policy.enforce(self.context, 'get_member', self.target) return self.member_repo.get(member_id) def save(self, member, from_state=None): self.policy.enforce(self.context, 'modify_member', self.target) self.member_repo.save(member, from_state=from_state) def list(self, *args, **kwargs): self.policy.enforce(self.context, 'get_members', self.target) return self.member_repo.list(*args, **kwargs) def remove(self, member): self.policy.enforce(self.context, 'delete_member', self.target) self.member_repo.remove(member) class ImageLocationsProxy(object): __hash__ = None def __init__(self, locations, context, policy): self.locations = locations self.context = context self.policy = policy def __copy__(self): return type(self)(self.locations, self.context, self.policy) def __deepcopy__(self, memo): # NOTE(zhiyan): Only copy location entries, others can be reused. return type(self)(copy.deepcopy(self.locations, memo), self.context, self.policy) def _get_checker(action, func_name): def _checker(self, *args, **kwargs): self.policy.enforce(self.context, action, {}) method = getattr(self.locations, func_name) return method(*args, **kwargs) return _checker count = _get_checker('get_image_location', 'count') index = _get_checker('get_image_location', 'index') __getitem__ = _get_checker('get_image_location', '__getitem__') __contains__ = _get_checker('get_image_location', '__contains__') __len__ = _get_checker('get_image_location', '__len__') __cast = _get_checker('get_image_location', '__cast') __cmp__ = _get_checker('get_image_location', '__cmp__') __iter__ = _get_checker('get_image_location', '__iter__') append = _get_checker('set_image_location', 'append') extend = _get_checker('set_image_location', 'extend') insert = _get_checker('set_image_location', 'insert') reverse = _get_checker('set_image_location', 'reverse') __iadd__ = _get_checker('set_image_location', '__iadd__') __setitem__ = _get_checker('set_image_location', '__setitem__') pop = _get_checker('delete_image_location', 'pop') remove = _get_checker('delete_image_location', 'remove') __delitem__ = _get_checker('delete_image_location', '__delitem__') __delslice__ = _get_checker('delete_image_location', '__delslice__') del _get_checker class TaskProxy(glance.domain.proxy.Task): def __init__(self, task, context, policy): self.task = task self.context = context self.policy = policy super(TaskProxy, self).__init__(task) class TaskStubProxy(glance.domain.proxy.TaskStub): def __init__(self, task_stub, context, policy): self.task_stub = task_stub self.context = context self.policy = policy super(TaskStubProxy, self).__init__(task_stub) class TaskRepoProxy(glance.domain.proxy.TaskRepo): def __init__(self, task_repo, context, task_policy): self.context = context self.policy = task_policy self.task_repo = task_repo proxy_kwargs = {'context': self.context, 'policy': self.policy} super(TaskRepoProxy, self).__init__(task_repo, task_proxy_class=TaskProxy, task_proxy_kwargs=proxy_kwargs) def get(self, task_id): self.policy.enforce(self.context, 'get_task', {}) return super(TaskRepoProxy, self).get(task_id) def add(self, task): self.policy.enforce(self.context, 'add_task', {}) super(TaskRepoProxy, self).add(task) def save(self, task): self.policy.enforce(self.context, 'modify_task', {}) super(TaskRepoProxy, self).save(task) class TaskStubRepoProxy(glance.domain.proxy.TaskStubRepo): def __init__(self, task_stub_repo, context, task_policy): self.context = context self.policy = task_policy self.task_stub_repo = task_stub_repo proxy_kwargs = {'context': self.context, 'policy': self.policy} super(TaskStubRepoProxy, self).__init__(task_stub_repo, task_stub_proxy_class=TaskStubProxy, task_stub_proxy_kwargs=proxy_kwargs) def list(self, *args, **kwargs): self.policy.enforce(self.context, 'get_tasks', {}) return super(TaskStubRepoProxy, self).list(*args, **kwargs) class TaskFactoryProxy(glance.domain.proxy.TaskFactory): def __init__(self, task_factory, context, policy): self.task_factory = task_factory self.context = context self.policy = policy proxy_kwargs = {'context': self.context, 'policy': self.policy} super(TaskFactoryProxy, self).__init__( task_factory, task_proxy_class=TaskProxy, task_proxy_kwargs=proxy_kwargs) class ImageTarget(object): SENTINEL = object() def __init__(self, target): """Initialize the object :param target: Object being targeted """ self.target = target def __getitem__(self, key): """Return the value of 'key' from the target. If the target has the attribute 'key', return it. :param key: value to retrieve """ key = self.key_transforms(key) value = getattr(self.target, key, self.SENTINEL) if value is self.SENTINEL: extra_properties = getattr(self.target, 'extra_properties', None) if extra_properties is not None: value = extra_properties[key] else: value = None return value def key_transforms(self, key): if key == 'id': key = 'image_id' return key # Metadef Namespace classes class MetadefNamespaceProxy(glance.domain.proxy.MetadefNamespace): def __init__(self, namespace, context, policy): self.namespace_input = namespace self.context = context self.policy = policy super(MetadefNamespaceProxy, self).__init__(namespace) class MetadefNamespaceRepoProxy(glance.domain.proxy.MetadefNamespaceRepo): def __init__(self, namespace_repo, context, namespace_policy): self.context = context self.policy = namespace_policy self.namespace_repo = namespace_repo proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefNamespaceRepoProxy, self).__init__(namespace_repo, namespace_proxy_class=MetadefNamespaceProxy, namespace_proxy_kwargs=proxy_kwargs) def get(self, namespace): self.policy.enforce(self.context, 'get_metadef_namespace', {}) return super(MetadefNamespaceRepoProxy, self).get(namespace) def list(self, *args, **kwargs): self.policy.enforce(self.context, 'get_metadef_namespaces', {}) return super(MetadefNamespaceRepoProxy, self).list(*args, **kwargs) def save(self, namespace): self.policy.enforce(self.context, 'modify_metadef_namespace', {}) return super(MetadefNamespaceRepoProxy, self).save(namespace) def add(self, namespace): self.policy.enforce(self.context, 'add_metadef_namespace', {}) return super(MetadefNamespaceRepoProxy, self).add(namespace) class MetadefNamespaceFactoryProxy( glance.domain.proxy.MetadefNamespaceFactory): def __init__(self, meta_namespace_factory, context, policy): self.meta_namespace_factory = meta_namespace_factory self.context = context self.policy = policy proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefNamespaceFactoryProxy, self).__init__( meta_namespace_factory, meta_namespace_proxy_class=MetadefNamespaceProxy, meta_namespace_proxy_kwargs=proxy_kwargs) # Metadef Object classes class MetadefObjectProxy(glance.domain.proxy.MetadefObject): def __init__(self, meta_object, context, policy): self.meta_object = meta_object self.context = context self.policy = policy super(MetadefObjectProxy, self).__init__(meta_object) class MetadefObjectRepoProxy(glance.domain.proxy.MetadefObjectRepo): def __init__(self, object_repo, context, object_policy): self.context = context self.policy = object_policy self.object_repo = object_repo proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefObjectRepoProxy, self).__init__(object_repo, object_proxy_class=MetadefObjectProxy, object_proxy_kwargs=proxy_kwargs) def get(self, namespace, object_name): self.policy.enforce(self.context, 'get_metadef_object', {}) return super(MetadefObjectRepoProxy, self).get(namespace, object_name) def list(self, *args, **kwargs): self.policy.enforce(self.context, 'get_metadef_objects', {}) return super(MetadefObjectRepoProxy, self).list(*args, **kwargs) def save(self, meta_object): self.policy.enforce(self.context, 'modify_metadef_object', {}) return super(MetadefObjectRepoProxy, self).save(meta_object) def add(self, meta_object): self.policy.enforce(self.context, 'add_metadef_object', {}) return super(MetadefObjectRepoProxy, self).add(meta_object) class MetadefObjectFactoryProxy(glance.domain.proxy.MetadefObjectFactory): def __init__(self, meta_object_factory, context, policy): self.meta_object_factory = meta_object_factory self.context = context self.policy = policy proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefObjectFactoryProxy, self).__init__( meta_object_factory, meta_object_proxy_class=MetadefObjectProxy, meta_object_proxy_kwargs=proxy_kwargs) # Metadef ResourceType classes class MetadefResourceTypeProxy(glance.domain.proxy.MetadefResourceType): def __init__(self, meta_resource_type, context, policy): self.meta_resource_type = meta_resource_type self.context = context self.policy = policy super(MetadefResourceTypeProxy, self).__init__(meta_resource_type) class MetadefResourceTypeRepoProxy( glance.domain.proxy.MetadefResourceTypeRepo): def __init__(self, resource_type_repo, context, resource_type_policy): self.context = context self.policy = resource_type_policy self.resource_type_repo = resource_type_repo proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefResourceTypeRepoProxy, self).__init__( resource_type_repo, resource_type_proxy_class=MetadefResourceTypeProxy, resource_type_proxy_kwargs=proxy_kwargs) def list(self, *args, **kwargs): self.policy.enforce(self.context, 'list_metadef_resource_types', {}) return super(MetadefResourceTypeRepoProxy, self).list(*args, **kwargs) def get(self, *args, **kwargs): self.policy.enforce(self.context, 'get_metadef_resource_type', {}) return super(MetadefResourceTypeRepoProxy, self).get(*args, **kwargs) def add(self, resource_type): self.policy.enforce(self.context, 'add_metadef_resource_type_association', {}) return super(MetadefResourceTypeRepoProxy, self).add(resource_type) class MetadefResourceTypeFactoryProxy( glance.domain.proxy.MetadefResourceTypeFactory): def __init__(self, resource_type_factory, context, policy): self.resource_type_factory = resource_type_factory self.context = context self.policy = policy proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefResourceTypeFactoryProxy, self).__init__( resource_type_factory, resource_type_proxy_class=MetadefResourceTypeProxy, resource_type_proxy_kwargs=proxy_kwargs) # Metadef namespace properties classes class MetadefPropertyProxy(glance.domain.proxy.MetadefProperty): def __init__(self, namespace_property, context, policy): self.namespace_property = namespace_property self.context = context self.policy = policy super(MetadefPropertyProxy, self).__init__(namespace_property) class MetadefPropertyRepoProxy(glance.domain.proxy.MetadefPropertyRepo): def __init__(self, property_repo, context, object_policy): self.context = context self.policy = object_policy self.property_repo = property_repo proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefPropertyRepoProxy, self).__init__( property_repo, property_proxy_class=MetadefPropertyProxy, property_proxy_kwargs=proxy_kwargs) def get(self, namespace, property_name): self.policy.enforce(self.context, 'get_metadef_property', {}) return super(MetadefPropertyRepoProxy, self).get(namespace, property_name) def list(self, *args, **kwargs): self.policy.enforce(self.context, 'get_metadef_properties', {}) return super(MetadefPropertyRepoProxy, self).list( *args, **kwargs) def save(self, namespace_property): self.policy.enforce(self.context, 'modify_metadef_property', {}) return super(MetadefPropertyRepoProxy, self).save( namespace_property) def add(self, namespace_property): self.policy.enforce(self.context, 'add_metadef_property', {}) return super(MetadefPropertyRepoProxy, self).add( namespace_property) class MetadefPropertyFactoryProxy(glance.domain.proxy.MetadefPropertyFactory): def __init__(self, namespace_property_factory, context, policy): self.namespace_property_factory = namespace_property_factory self.context = context self.policy = policy proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefPropertyFactoryProxy, self).__init__( namespace_property_factory, property_proxy_class=MetadefPropertyProxy, property_proxy_kwargs=proxy_kwargs) # Metadef Tag classes class MetadefTagProxy(glance.domain.proxy.MetadefTag): def __init__(self, meta_tag, context, policy): self.context = context self.policy = policy super(MetadefTagProxy, self).__init__(meta_tag) class MetadefTagRepoProxy(glance.domain.proxy.MetadefTagRepo): def __init__(self, tag_repo, context, tag_policy): self.context = context self.policy = tag_policy self.tag_repo = tag_repo proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefTagRepoProxy, self).__init__(tag_repo, tag_proxy_class=MetadefTagProxy, tag_proxy_kwargs=proxy_kwargs) def get(self, namespace, tag_name): self.policy.enforce(self.context, 'get_metadef_tag', {}) return super(MetadefTagRepoProxy, self).get(namespace, tag_name) def list(self, *args, **kwargs): self.policy.enforce(self.context, 'get_metadef_tags', {}) return super(MetadefTagRepoProxy, self).list(*args, **kwargs) def save(self, meta_tag): self.policy.enforce(self.context, 'modify_metadef_tag', {}) return super(MetadefTagRepoProxy, self).save(meta_tag) def add(self, meta_tag): self.policy.enforce(self.context, 'add_metadef_tag', {}) return super(MetadefTagRepoProxy, self).add(meta_tag) def add_tags(self, meta_tags): self.policy.enforce(self.context, 'add_metadef_tags', {}) return super(MetadefTagRepoProxy, self).add_tags(meta_tags) class MetadefTagFactoryProxy(glance.domain.proxy.MetadefTagFactory): def __init__(self, meta_tag_factory, context, policy): self.meta_tag_factory = meta_tag_factory self.context = context self.policy = policy proxy_kwargs = {'context': self.context, 'policy': self.policy} super(MetadefTagFactoryProxy, self).__init__( meta_tag_factory, meta_tag_proxy_class=MetadefTagProxy, meta_tag_proxy_kwargs=proxy_kwargs) glance-12.0.0/glance/api/versions.py0000664000567000056710000000564612701407047020425 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from six.moves import http_client import webob.dec from glance.common import wsgi from glance.i18n import _ versions_opts = [ cfg.StrOpt('public_endpoint', help=_('Public url to use for versions endpoint. The default ' 'is None, which will use the request\'s host_url ' 'attribute to populate the URL base. If Glance is ' 'operating behind a proxy, you will want to change ' 'this to represent the proxy\'s URL.')), ] CONF = cfg.CONF CONF.register_opts(versions_opts) class Controller(object): """A wsgi controller that reports which API versions are supported.""" def index(self, req, explicit=False): """Respond to a request for all OpenStack API versions.""" def build_version_object(version, path, status): url = CONF.public_endpoint or req.host_url return { 'id': 'v%s' % version, 'status': status, 'links': [ { 'rel': 'self', 'href': '%s/%s/' % (url, path), }, ], } version_objs = [] if CONF.enable_v2_api: version_objs.extend([ build_version_object(2.3, 'v2', 'CURRENT'), build_version_object(2.2, 'v2', 'SUPPORTED'), build_version_object(2.1, 'v2', 'SUPPORTED'), build_version_object(2.0, 'v2', 'SUPPORTED'), ]) if CONF.enable_v1_api: version_objs.extend([ build_version_object(1.1, 'v1', 'SUPPORTED'), build_version_object(1.0, 'v1', 'SUPPORTED'), ]) status = explicit and http_client.OK or http_client.MULTIPLE_CHOICES response = webob.Response(request=req, status=status, content_type='application/json') response.body = jsonutils.dump_as_bytes(dict(versions=version_objs)) return response @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): return self.index(req) def create_resource(conf): return wsgi.Resource(Controller()) glance-12.0.0/glance/api/cached_images.py0000664000567000056710000000721112701407047021277 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Controller for Image Cache Management API """ from oslo_log import log as logging import webob.exc from glance.api import policy from glance.api.v1 import controller from glance.common import exception from glance.common import wsgi from glance import image_cache LOG = logging.getLogger(__name__) class Controller(controller.BaseController): """ A controller for managing cached images. """ def __init__(self): self.cache = image_cache.ImageCache() self.policy = policy.Enforcer() def _enforce(self, req): """Authorize request against 'manage_image_cache' policy""" try: self.policy.enforce(req.context, 'manage_image_cache', {}) except exception.Forbidden: LOG.debug("User not permitted to manage the image cache") raise webob.exc.HTTPForbidden() def get_cached_images(self, req): """ GET /cached_images Returns a mapping of records about cached images. """ self._enforce(req) images = self.cache.get_cached_images() return dict(cached_images=images) def delete_cached_image(self, req, image_id): """ DELETE /cached_images/ Removes an image from the cache. """ self._enforce(req) self.cache.delete_cached_image(image_id) def delete_cached_images(self, req): """ DELETE /cached_images - Clear all active cached images Removes all images from the cache. """ self._enforce(req) return dict(num_deleted=self.cache.delete_all_cached_images()) def get_queued_images(self, req): """ GET /queued_images Returns a mapping of records about queued images. """ self._enforce(req) images = self.cache.get_queued_images() return dict(queued_images=images) def queue_image(self, req, image_id): """ PUT /queued_images/ Queues an image for caching. We do not check to see if the image is in the registry here. That is done by the prefetcher... """ self._enforce(req) self.cache.queue_image(image_id) def delete_queued_image(self, req, image_id): """ DELETE /queued_images/ Removes an image from the cache. """ self._enforce(req) self.cache.delete_queued_image(image_id) def delete_queued_images(self, req): """ DELETE /queued_images - Clear all active queued images Removes all images from the cache. """ self._enforce(req) return dict(num_deleted=self.cache.delete_all_queued_images()) class CachedImageDeserializer(wsgi.JSONRequestDeserializer): pass class CachedImageSerializer(wsgi.JSONResponseSerializer): pass def create_resource(): """Cached Images resource factory method""" deserializer = CachedImageDeserializer() serializer = CachedImageSerializer() return wsgi.Resource(Controller(), deserializer, serializer) glance-12.0.0/glance/api/__init__.py0000664000567000056710000000176112701407047020306 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import paste.urlmap CONF = cfg.CONF def root_app_factory(loader, global_conf, **local_conf): if not CONF.enable_v1_api and '/v1' in local_conf: del local_conf['/v1'] if not CONF.enable_v2_api and '/v2' in local_conf: del local_conf['/v2'] return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) glance-12.0.0/glance/api/common.py0000664000567000056710000001702612701407047020040 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from glance.common import exception from glance.common import wsgi from glance.i18n import _, _LE, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF _CACHED_THREAD_POOL = {} def size_checked_iter(response, image_meta, expected_size, image_iter, notifier): image_id = image_meta['id'] bytes_written = 0 def notify_image_sent_hook(env): image_send_notification(bytes_written, expected_size, image_meta, response.request, notifier) # Add hook to process after response is fully sent if 'eventlet.posthooks' in response.request.environ: response.request.environ['eventlet.posthooks'].append( (notify_image_sent_hook, (), {})) try: for chunk in image_iter: yield chunk bytes_written += len(chunk) except Exception as err: with excutils.save_and_reraise_exception(): msg = (_LE("An error occurred reading from backend storage for " "image %(image_id)s: %(err)s") % {'image_id': image_id, 'err': err}) LOG.error(msg) if expected_size != bytes_written: msg = (_LE("Backend storage for image %(image_id)s " "disconnected after writing only %(bytes_written)d " "bytes") % {'image_id': image_id, 'bytes_written': bytes_written}) LOG.error(msg) raise exception.GlanceException(_("Corrupt image download for " "image %(image_id)s") % {'image_id': image_id}) def image_send_notification(bytes_written, expected_size, image_meta, request, notifier): """Send an image.send message to the notifier.""" try: context = request.context payload = { 'bytes_sent': bytes_written, 'image_id': image_meta['id'], 'owner_id': image_meta['owner'], 'receiver_tenant_id': context.tenant, 'receiver_user_id': context.user, 'destination_ip': request.remote_addr, } if bytes_written != expected_size: notify = notifier.error else: notify = notifier.info notify('image.send', payload) except Exception as err: msg = (_LE("An error occurred during image.send" " notification: %(err)s") % {'err': err}) LOG.error(msg) def get_remaining_quota(context, db_api, image_id=None): """Method called to see if the user is allowed to store an image. Checks if it is allowed based on the given size in glance based on their quota and current usage. :param context: :param db_api: The db_api in use for this configuration :param image_id: The image that will be replaced with this new data size :returns: The number of bytes the user has remaining under their quota. None means infinity """ # NOTE(jbresnah) in the future this value will come from a call to # keystone. users_quota = CONF.user_storage_quota # set quota must have a number optionally followed by B, KB, MB, # GB or TB without any spaces in between pattern = re.compile('^(\d+)((K|M|G|T)?B)?$') match = pattern.match(users_quota) if not match: LOG.error(_LE("Invalid value for option user_storage_quota: " "%(users_quota)s") % {'users_quota': users_quota}) raise exception.InvalidOptionValue(option='user_storage_quota', value=users_quota) quota_value, quota_unit = (match.groups())[0:2] # fall back to Bytes if user specified anything other than # permitted values quota_unit = quota_unit or "B" factor = getattr(units, quota_unit.replace('B', 'i'), 1) users_quota = int(quota_value) * factor if users_quota <= 0: return usage = db_api.user_get_storage_usage(context, context.owner, image_id=image_id) return users_quota - usage def check_quota(context, image_size, db_api, image_id=None): """Method called to see if the user is allowed to store an image. Checks if it is allowed based on the given size in glance based on their quota and current usage. :param context: :param image_size: The size of the image we hope to store :param db_api: The db_api in use for this configuration :param image_id: The image that will be replaced with this new data size :returns: """ remaining = get_remaining_quota(context, db_api, image_id=image_id) if remaining is None: return user = getattr(context, 'user', '') if image_size is None: # NOTE(jbresnah) When the image size is None it means that it is # not known. In this case the only time we will raise an # exception is when there is no room left at all, thus we know # it will not fit if remaining <= 0: LOG.warn(_LW("User %(user)s attempted to upload an image of" " unknown size that will exceed the quota." " %(remaining)d bytes remaining.") % {'user': user, 'remaining': remaining}) raise exception.StorageQuotaFull(image_size=image_size, remaining=remaining) return if image_size > remaining: LOG.warn(_LW("User %(user)s attempted to upload an image of size" " %(size)d that will exceed the quota. %(remaining)d" " bytes remaining.") % {'user': user, 'size': image_size, 'remaining': remaining}) raise exception.StorageQuotaFull(image_size=image_size, remaining=remaining) return remaining def memoize(lock_name): def memoizer_wrapper(func): @lockutils.synchronized(lock_name) def memoizer(lock_name): if lock_name not in _CACHED_THREAD_POOL: _CACHED_THREAD_POOL[lock_name] = func() return _CACHED_THREAD_POOL[lock_name] return memoizer(lock_name) return memoizer_wrapper def get_thread_pool(lock_name, size=1024): """Initializes eventlet thread pool. If thread pool is present in cache, then returns it from cache else create new pool, stores it in cache and return newly created pool. @param lock_name: Name of the lock. @param size: Size of eventlet pool. @return: eventlet pool """ @memoize(lock_name) def _get_thread_pool(): return wsgi.get_asynchronous_eventlet_pool(size=size) return _get_thread_pool glance-12.0.0/glance/api/authorization.py0000664000567000056710000007462112701407047021454 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from glance.common import exception import glance.domain.proxy from glance.i18n import _ def is_image_mutable(context, image): """Return True if the image is mutable in this context.""" if context.is_admin: return True if image.owner is None or context.owner is None: return False return image.owner == context.owner def proxy_image(context, image): if is_image_mutable(context, image): return ImageProxy(image, context) else: return ImmutableImageProxy(image, context) def is_member_mutable(context, member): """Return True if the image is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return member.member_id == context.owner def proxy_member(context, member): if is_member_mutable(context, member): return member else: return ImmutableMemberProxy(member) def is_task_mutable(context, task): """Return True if the task is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return task.owner == context.owner def is_task_stub_mutable(context, task_stub): """Return True if the task stub is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return task_stub.owner == context.owner def proxy_task(context, task): if is_task_mutable(context, task): return task else: return ImmutableTaskProxy(task) def proxy_task_stub(context, task_stub): if is_task_stub_mutable(context, task_stub): return task_stub else: return ImmutableTaskStubProxy(task_stub) class ImageRepoProxy(glance.domain.proxy.Repo): def __init__(self, image_repo, context): self.context = context self.image_repo = image_repo proxy_kwargs = {'context': self.context} super(ImageRepoProxy, self).__init__(image_repo, item_proxy_class=ImageProxy, item_proxy_kwargs=proxy_kwargs) def get(self, image_id): image = self.image_repo.get(image_id) return proxy_image(self.context, image) def list(self, *args, **kwargs): images = self.image_repo.list(*args, **kwargs) return [proxy_image(self.context, i) for i in images] class ImageMemberRepoProxy(glance.domain.proxy.MemberRepo): def __init__(self, member_repo, image, context): self.member_repo = member_repo self.image = image self.context = context proxy_kwargs = {'context': self.context} super(ImageMemberRepoProxy, self).__init__( image, member_repo, member_proxy_class=ImageMemberProxy, member_proxy_kwargs=proxy_kwargs) self._check_image_visibility() def _check_image_visibility(self): if self.image.visibility == 'public': message = _("Public images do not have members.") raise exception.Forbidden(message) def get(self, member_id): if (self.context.is_admin or self.context.owner in (self.image.owner, member_id)): member = self.member_repo.get(member_id) return proxy_member(self.context, member) else: message = _("You cannot get image member for %s") raise exception.Forbidden(message % member_id) def list(self, *args, **kwargs): members = self.member_repo.list(*args, **kwargs) if (self.context.is_admin or self.context.owner == self.image.owner): return [proxy_member(self.context, m) for m in members] for member in members: if member.member_id == self.context.owner: return [proxy_member(self.context, member)] message = _("You cannot get image member for %s") raise exception.Forbidden(message % self.image.image_id) def remove(self, image_member): if (self.image.owner == self.context.owner or self.context.is_admin): self.member_repo.remove(image_member) else: message = _("You cannot delete image member for %s") raise exception.Forbidden(message % self.image.image_id) def add(self, image_member): if (self.image.owner == self.context.owner or self.context.is_admin): self.member_repo.add(image_member) else: message = _("You cannot add image member for %s") raise exception.Forbidden(message % self.image.image_id) def save(self, image_member, from_state=None): if (self.context.is_admin or self.context.owner == image_member.member_id): self.member_repo.save(image_member, from_state=from_state) else: message = _("You cannot update image member %s") raise exception.Forbidden(message % image_member.member_id) class ImageFactoryProxy(glance.domain.proxy.ImageFactory): def __init__(self, image_factory, context): self.image_factory = image_factory self.context = context kwargs = {'context': self.context} super(ImageFactoryProxy, self).__init__(image_factory, proxy_class=ImageProxy, proxy_kwargs=kwargs) def new_image(self, **kwargs): owner = kwargs.pop('owner', self.context.owner) if not self.context.is_admin: if owner is None or owner != self.context.owner: message = _("You are not permitted to create images " "owned by '%s'.") raise exception.Forbidden(message % owner) return super(ImageFactoryProxy, self).new_image(owner=owner, **kwargs) class ImageMemberFactoryProxy(glance.domain.proxy.ImageMembershipFactory): def __init__(self, image_member_factory, context): self.image_member_factory = image_member_factory self.context = context kwargs = {'context': self.context} super(ImageMemberFactoryProxy, self).__init__( image_member_factory, proxy_class=ImageMemberProxy, proxy_kwargs=kwargs) def new_image_member(self, image, member_id): owner = image.owner if not self.context.is_admin: if owner is None or owner != self.context.owner: message = _("You are not permitted to create image members " "for the image.") raise exception.Forbidden(message) if image.visibility == 'public': message = _("Public images do not have members.") raise exception.Forbidden(message) return self.image_member_factory.new_image_member(image, member_id) def _immutable_attr(target, attr, proxy=None): def get_attr(self): value = getattr(getattr(self, target), attr) if proxy is not None: value = proxy(value) return value def forbidden(self, *args, **kwargs): resource = getattr(self, 'resource_name', 'resource') message = _("You are not permitted to modify '%(attr)s' on this " "%(resource)s.") raise exception.Forbidden(message % {'attr': attr, 'resource': resource}) return property(get_attr, forbidden, forbidden) class ImmutableLocations(list): def forbidden(self, *args, **kwargs): message = _("You are not permitted to modify locations " "for this image.") raise exception.Forbidden(message) def __deepcopy__(self, memo): return ImmutableLocations(copy.deepcopy(list(self), memo)) append = forbidden extend = forbidden insert = forbidden pop = forbidden remove = forbidden reverse = forbidden sort = forbidden __delitem__ = forbidden __delslice__ = forbidden __iadd__ = forbidden __imul__ = forbidden __setitem__ = forbidden __setslice__ = forbidden class ImmutableProperties(dict): def forbidden_key(self, key, *args, **kwargs): message = _("You are not permitted to modify '%s' on this image.") raise exception.Forbidden(message % key) def forbidden(self, *args, **kwargs): message = _("You are not permitted to modify this image.") raise exception.Forbidden(message) __delitem__ = forbidden_key __setitem__ = forbidden_key pop = forbidden popitem = forbidden setdefault = forbidden update = forbidden class ImmutableTags(set): def forbidden(self, *args, **kwargs): message = _("You are not permitted to modify tags on this image.") raise exception.Forbidden(message) add = forbidden clear = forbidden difference_update = forbidden intersection_update = forbidden pop = forbidden remove = forbidden symmetric_difference_update = forbidden update = forbidden class ImmutableImageProxy(object): def __init__(self, base, context): self.base = base self.context = context self.resource_name = 'image' name = _immutable_attr('base', 'name') image_id = _immutable_attr('base', 'image_id') status = _immutable_attr('base', 'status') created_at = _immutable_attr('base', 'created_at') updated_at = _immutable_attr('base', 'updated_at') visibility = _immutable_attr('base', 'visibility') min_disk = _immutable_attr('base', 'min_disk') min_ram = _immutable_attr('base', 'min_ram') protected = _immutable_attr('base', 'protected') locations = _immutable_attr('base', 'locations', proxy=ImmutableLocations) checksum = _immutable_attr('base', 'checksum') owner = _immutable_attr('base', 'owner') disk_format = _immutable_attr('base', 'disk_format') container_format = _immutable_attr('base', 'container_format') size = _immutable_attr('base', 'size') virtual_size = _immutable_attr('base', 'virtual_size') extra_properties = _immutable_attr('base', 'extra_properties', proxy=ImmutableProperties) tags = _immutable_attr('base', 'tags', proxy=ImmutableTags) def delete(self): message = _("You are not permitted to delete this image.") raise exception.Forbidden(message) def get_data(self, *args, **kwargs): return self.base.get_data(*args, **kwargs) def set_data(self, *args, **kwargs): message = _("You are not permitted to upload data for this image.") raise exception.Forbidden(message) def deactivate(self, *args, **kwargs): message = _("You are not permitted to deactivate this image.") raise exception.Forbidden(message) def reactivate(self, *args, **kwargs): message = _("You are not permitted to reactivate this image.") raise exception.Forbidden(message) class ImmutableMemberProxy(object): def __init__(self, base): self.base = base self.resource_name = 'image member' id = _immutable_attr('base', 'id') image_id = _immutable_attr('base', 'image_id') member_id = _immutable_attr('base', 'member_id') status = _immutable_attr('base', 'status') created_at = _immutable_attr('base', 'created_at') updated_at = _immutable_attr('base', 'updated_at') class ImmutableTaskProxy(object): def __init__(self, base): self.base = base self.resource_name = 'task' task_id = _immutable_attr('base', 'task_id') type = _immutable_attr('base', 'type') status = _immutable_attr('base', 'status') owner = _immutable_attr('base', 'owner') expires_at = _immutable_attr('base', 'expires_at') created_at = _immutable_attr('base', 'created_at') updated_at = _immutable_attr('base', 'updated_at') input = _immutable_attr('base', 'input') message = _immutable_attr('base', 'message') result = _immutable_attr('base', 'result') def run(self, executor): self.base.run(executor) def begin_processing(self): message = _("You are not permitted to set status on this task.") raise exception.Forbidden(message) def succeed(self, result): message = _("You are not permitted to set status on this task.") raise exception.Forbidden(message) def fail(self, message): message = _("You are not permitted to set status on this task.") raise exception.Forbidden(message) class ImmutableTaskStubProxy(object): def __init__(self, base): self.base = base self.resource_name = 'task stub' task_id = _immutable_attr('base', 'task_id') type = _immutable_attr('base', 'type') status = _immutable_attr('base', 'status') owner = _immutable_attr('base', 'owner') expires_at = _immutable_attr('base', 'expires_at') created_at = _immutable_attr('base', 'created_at') updated_at = _immutable_attr('base', 'updated_at') class ImageProxy(glance.domain.proxy.Image): def __init__(self, image, context): self.image = image self.context = context super(ImageProxy, self).__init__(image) class ImageMemberProxy(glance.domain.proxy.ImageMember): def __init__(self, image_member, context): self.image_member = image_member self.context = context super(ImageMemberProxy, self).__init__(image_member) class TaskProxy(glance.domain.proxy.Task): def __init__(self, task): self.task = task super(TaskProxy, self).__init__(task) class TaskFactoryProxy(glance.domain.proxy.TaskFactory): def __init__(self, task_factory, context): self.task_factory = task_factory self.context = context super(TaskFactoryProxy, self).__init__( task_factory, task_proxy_class=TaskProxy) def new_task(self, **kwargs): owner = kwargs.get('owner', self.context.owner) # NOTE(nikhil): Unlike Images, Tasks are expected to have owner. # We currently do not allow even admins to set the owner to None. if owner is not None and (owner == self.context.owner or self.context.is_admin): return super(TaskFactoryProxy, self).new_task(**kwargs) else: message = _("You are not permitted to create this task with " "owner as: %s") raise exception.Forbidden(message % owner) class TaskRepoProxy(glance.domain.proxy.TaskRepo): def __init__(self, task_repo, context): self.task_repo = task_repo self.context = context super(TaskRepoProxy, self).__init__(task_repo) def get(self, task_id): task = self.task_repo.get(task_id) return proxy_task(self.context, task) class TaskStubRepoProxy(glance.domain.proxy.TaskStubRepo): def __init__(self, task_stub_repo, context): self.task_stub_repo = task_stub_repo self.context = context super(TaskStubRepoProxy, self).__init__(task_stub_repo) def list(self, *args, **kwargs): task_stubs = self.task_stub_repo.list(*args, **kwargs) return [proxy_task_stub(self.context, t) for t in task_stubs] # Metadef Namespace classes def is_namespace_mutable(context, namespace): """Return True if the namespace is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return namespace.owner == context.owner def proxy_namespace(context, namespace): if is_namespace_mutable(context, namespace): return namespace else: return ImmutableMetadefNamespaceProxy(namespace) class ImmutableMetadefNamespaceProxy(object): def __init__(self, base): self.base = base self.resource_name = 'namespace' namespace_id = _immutable_attr('base', 'namespace_id') namespace = _immutable_attr('base', 'namespace') display_name = _immutable_attr('base', 'display_name') description = _immutable_attr('base', 'description') owner = _immutable_attr('base', 'owner') visibility = _immutable_attr('base', 'visibility') protected = _immutable_attr('base', 'protected') created_at = _immutable_attr('base', 'created_at') updated_at = _immutable_attr('base', 'updated_at') def delete(self): message = _("You are not permitted to delete this namespace.") raise exception.Forbidden(message) def save(self): message = _("You are not permitted to update this namespace.") raise exception.Forbidden(message) class MetadefNamespaceProxy(glance.domain.proxy.MetadefNamespace): def __init__(self, namespace): self.namespace_input = namespace super(MetadefNamespaceProxy, self).__init__(namespace) class MetadefNamespaceFactoryProxy( glance.domain.proxy.MetadefNamespaceFactory): def __init__(self, meta_namespace_factory, context): self.meta_namespace_factory = meta_namespace_factory self.context = context super(MetadefNamespaceFactoryProxy, self).__init__( meta_namespace_factory, meta_namespace_proxy_class=MetadefNamespaceProxy) def new_namespace(self, **kwargs): owner = kwargs.pop('owner', self.context.owner) if not self.context.is_admin: if owner is None or owner != self.context.owner: message = _("You are not permitted to create namespace " "owned by '%s'") raise exception.Forbidden(message % (owner)) return super(MetadefNamespaceFactoryProxy, self).new_namespace( owner=owner, **kwargs) class MetadefNamespaceRepoProxy(glance.domain.proxy.MetadefNamespaceRepo): def __init__(self, namespace_repo, context): self.namespace_repo = namespace_repo self.context = context super(MetadefNamespaceRepoProxy, self).__init__(namespace_repo) def get(self, namespace): namespace_obj = self.namespace_repo.get(namespace) return proxy_namespace(self.context, namespace_obj) def list(self, *args, **kwargs): namespaces = self.namespace_repo.list(*args, **kwargs) return [proxy_namespace(self.context, namespace) for namespace in namespaces] # Metadef Object classes def is_object_mutable(context, object): """Return True if the object is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return object.namespace.owner == context.owner def proxy_object(context, object): if is_object_mutable(context, object): return object else: return ImmutableMetadefObjectProxy(object) class ImmutableMetadefObjectProxy(object): def __init__(self, base): self.base = base self.resource_name = 'object' object_id = _immutable_attr('base', 'object_id') name = _immutable_attr('base', 'name') required = _immutable_attr('base', 'required') description = _immutable_attr('base', 'description') properties = _immutable_attr('base', 'properties') created_at = _immutable_attr('base', 'created_at') updated_at = _immutable_attr('base', 'updated_at') def delete(self): message = _("You are not permitted to delete this object.") raise exception.Forbidden(message) def save(self): message = _("You are not permitted to update this object.") raise exception.Forbidden(message) class MetadefObjectProxy(glance.domain.proxy.MetadefObject): def __init__(self, meta_object): self.meta_object = meta_object super(MetadefObjectProxy, self).__init__(meta_object) class MetadefObjectFactoryProxy(glance.domain.proxy.MetadefObjectFactory): def __init__(self, meta_object_factory, context): self.meta_object_factory = meta_object_factory self.context = context super(MetadefObjectFactoryProxy, self).__init__( meta_object_factory, meta_object_proxy_class=MetadefObjectProxy) def new_object(self, **kwargs): owner = kwargs.pop('owner', self.context.owner) if not self.context.is_admin: if owner is None or owner != self.context.owner: message = _("You are not permitted to create object " "owned by '%s'") raise exception.Forbidden(message % (owner)) return super(MetadefObjectFactoryProxy, self).new_object(**kwargs) class MetadefObjectRepoProxy(glance.domain.proxy.MetadefObjectRepo): def __init__(self, object_repo, context): self.object_repo = object_repo self.context = context super(MetadefObjectRepoProxy, self).__init__(object_repo) def get(self, namespace, object_name): meta_object = self.object_repo.get(namespace, object_name) return proxy_object(self.context, meta_object) def list(self, *args, **kwargs): objects = self.object_repo.list(*args, **kwargs) return [proxy_object(self.context, meta_object) for meta_object in objects] # Metadef ResourceType classes def is_meta_resource_type_mutable(context, meta_resource_type): """Return True if the meta_resource_type is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False # (lakshmiS): resource type can exist without an association with # namespace and resource type cannot be created/update/deleted directly( # they have to be associated/de-associated from namespace) if meta_resource_type.namespace: return meta_resource_type.namespace.owner == context.owner else: return False def proxy_meta_resource_type(context, meta_resource_type): if is_meta_resource_type_mutable(context, meta_resource_type): return meta_resource_type else: return ImmutableMetadefResourceTypeProxy(meta_resource_type) class ImmutableMetadefResourceTypeProxy(object): def __init__(self, base): self.base = base self.resource_name = 'meta_resource_type' namespace = _immutable_attr('base', 'namespace') name = _immutable_attr('base', 'name') prefix = _immutable_attr('base', 'prefix') properties_target = _immutable_attr('base', 'properties_target') created_at = _immutable_attr('base', 'created_at') updated_at = _immutable_attr('base', 'updated_at') def delete(self): message = _("You are not permitted to delete this meta_resource_type.") raise exception.Forbidden(message) class MetadefResourceTypeProxy(glance.domain.proxy.MetadefResourceType): def __init__(self, meta_resource_type): self.meta_resource_type = meta_resource_type super(MetadefResourceTypeProxy, self).__init__(meta_resource_type) class MetadefResourceTypeFactoryProxy( glance.domain.proxy.MetadefResourceTypeFactory): def __init__(self, resource_type_factory, context): self.meta_resource_type_factory = resource_type_factory self.context = context super(MetadefResourceTypeFactoryProxy, self).__init__( resource_type_factory, resource_type_proxy_class=MetadefResourceTypeProxy) def new_resource_type(self, **kwargs): owner = kwargs.pop('owner', self.context.owner) if not self.context.is_admin: if owner is None or owner != self.context.owner: message = _("You are not permitted to create resource_type " "owned by '%s'") raise exception.Forbidden(message % (owner)) return super(MetadefResourceTypeFactoryProxy, self).new_resource_type( **kwargs) class MetadefResourceTypeRepoProxy( glance.domain.proxy.MetadefResourceTypeRepo): def __init__(self, meta_resource_type_repo, context): self.meta_resource_type_repo = meta_resource_type_repo self.context = context super(MetadefResourceTypeRepoProxy, self).__init__( meta_resource_type_repo) def list(self, *args, **kwargs): meta_resource_types = self.meta_resource_type_repo.list( *args, **kwargs) return [proxy_meta_resource_type(self.context, meta_resource_type) for meta_resource_type in meta_resource_types] def get(self, *args, **kwargs): meta_resource_type = self.meta_resource_type_repo.get(*args, **kwargs) return proxy_meta_resource_type(self.context, meta_resource_type) # Metadef namespace properties classes def is_namespace_property_mutable(context, namespace_property): """Return True if the object is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return namespace_property.namespace.owner == context.owner def proxy_namespace_property(context, namespace_property): if is_namespace_property_mutable(context, namespace_property): return namespace_property else: return ImmutableMetadefPropertyProxy(namespace_property) class ImmutableMetadefPropertyProxy(object): def __init__(self, base): self.base = base self.resource_name = 'namespace_property' property_id = _immutable_attr('base', 'property_id') name = _immutable_attr('base', 'name') schema = _immutable_attr('base', 'schema') def delete(self): message = _("You are not permitted to delete this property.") raise exception.Forbidden(message) def save(self): message = _("You are not permitted to update this property.") raise exception.Forbidden(message) class MetadefPropertyProxy(glance.domain.proxy.MetadefProperty): def __init__(self, namespace_property): self.meta_object = namespace_property super(MetadefPropertyProxy, self).__init__(namespace_property) class MetadefPropertyFactoryProxy(glance.domain.proxy.MetadefPropertyFactory): def __init__(self, namespace_property_factory, context): self.meta_object_factory = namespace_property_factory self.context = context super(MetadefPropertyFactoryProxy, self).__init__( namespace_property_factory, property_proxy_class=MetadefPropertyProxy) def new_namespace_property(self, **kwargs): owner = kwargs.pop('owner', self.context.owner) if not self.context.is_admin: if owner is None or owner != self.context.owner: message = _("You are not permitted to create property " "owned by '%s'") raise exception.Forbidden(message % (owner)) return super(MetadefPropertyFactoryProxy, self).new_namespace_property( **kwargs) class MetadefPropertyRepoProxy(glance.domain.proxy.MetadefPropertyRepo): def __init__(self, namespace_property_repo, context): self.namespace_property_repo = namespace_property_repo self.context = context super(MetadefPropertyRepoProxy, self).__init__(namespace_property_repo) def get(self, namespace, object_name): namespace_property = self.namespace_property_repo.get(namespace, object_name) return proxy_namespace_property(self.context, namespace_property) def list(self, *args, **kwargs): namespace_properties = self.namespace_property_repo.list( *args, **kwargs) return [proxy_namespace_property(self.context, namespace_property) for namespace_property in namespace_properties] # Metadef Tag classes def is_tag_mutable(context, tag): """Return True if the tag is mutable in this context.""" if context.is_admin: return True if context.owner is None: return False return tag.namespace.owner == context.owner def proxy_tag(context, tag): if is_tag_mutable(context, tag): return tag else: return ImmutableMetadefTagProxy(tag) class ImmutableMetadefTagProxy(object): def __init__(self, base): self.base = base self.resource_name = 'tag' tag_id = _immutable_attr('base', 'tag_id') name = _immutable_attr('base', 'name') created_at = _immutable_attr('base', 'created_at') updated_at = _immutable_attr('base', 'updated_at') def delete(self): message = _("You are not permitted to delete this tag.") raise exception.Forbidden(message) def save(self): message = _("You are not permitted to update this tag.") raise exception.Forbidden(message) class MetadefTagProxy(glance.domain.proxy.MetadefTag): pass class MetadefTagFactoryProxy(glance.domain.proxy.MetadefTagFactory): def __init__(self, meta_tag_factory, context): self.meta_tag_factory = meta_tag_factory self.context = context super(MetadefTagFactoryProxy, self).__init__( meta_tag_factory, meta_tag_proxy_class=MetadefTagProxy) def new_tag(self, **kwargs): owner = kwargs.pop('owner', self.context.owner) if not self.context.is_admin: if owner is None: message = _("Owner must be specified to create a tag.") raise exception.Forbidden(message) elif owner != self.context.owner: message = _("You are not permitted to create a tag" " in the namespace owned by '%s'") raise exception.Forbidden(message % (owner)) return super(MetadefTagFactoryProxy, self).new_tag(**kwargs) class MetadefTagRepoProxy(glance.domain.proxy.MetadefTagRepo): def __init__(self, tag_repo, context): self.tag_repo = tag_repo self.context = context super(MetadefTagRepoProxy, self).__init__(tag_repo) def get(self, namespace, tag_name): meta_tag = self.tag_repo.get(namespace, tag_name) return proxy_tag(self.context, meta_tag) def list(self, *args, **kwargs): tags = self.tag_repo.list(*args, **kwargs) return [proxy_tag(self.context, meta_tag) for meta_tag in tags] glance-12.0.0/glance/api/middleware/0000775000567000056710000000000012701407204020300 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/middleware/__init__.py0000664000567000056710000000000012701407047022404 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/middleware/cache.py0000664000567000056710000003136012701407047021725 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Transparent image file caching middleware, designed to live on Glance API nodes. When images are requested from the API node, this middleware caches the returned image file to local filesystem. When subsequent requests for the same image file are received, the local cached copy of the image file is returned. """ import re import six from oslo_log import log as logging import webob from glance.api.common import size_checked_iter from glance.api import policy from glance.api.v1 import images from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db from glance.i18n import _LE, _LI from glance import image_cache from glance import notifier import glance.registry.client.v1.api as registry LOG = logging.getLogger(__name__) PATTERNS = { ('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'), ('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'), ('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'), ('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$') } class CacheFilter(wsgi.Middleware): def __init__(self, app): self.cache = image_cache.ImageCache() self.serializer = images.ImageSerializer() self.policy = policy.Enforcer() LOG.info(_LI("Initialized image cache middleware")) super(CacheFilter, self).__init__(app) def _verify_metadata(self, image_meta): """ Sanity check the 'deleted' and 'size' metadata values. """ # NOTE: admins can see image metadata in the v1 API, but shouldn't # be able to download the actual image data. if image_meta['status'] == 'deleted' and image_meta['deleted']: raise exception.NotFound() if not image_meta['size']: # override image size metadata with the actual cached # file size, see LP Bug #900959 image_meta['size'] = self.cache.get_image_size(image_meta['id']) @staticmethod def _match_request(request): """Determine the version of the url and extract the image id :returns: tuple of version and image id if the url is a cacheable, otherwise None """ for ((version, method), pattern) in PATTERNS.items(): if request.method != method: continue match = pattern.match(request.path_info) if match is None: continue image_id = match.group(1) # Ensure the image id we got looks like an image id to filter # out a URI like /images/detail. See LP Bug #879136 if image_id != 'detail': return (version, method, image_id) def _enforce(self, req, action, target=None): """Authorize an action against our policies""" if target is None: target = {} try: self.policy.enforce(req.context, action, target) except exception.Forbidden as e: LOG.debug("User not permitted to perform '%s' action", action) raise webob.exc.HTTPForbidden(explanation=e.msg, request=req) def _get_v1_image_metadata(self, request, image_id): """ Retrieves image metadata using registry for v1 api and creates dictionary-like mash-up of image core and custom properties. """ try: image_metadata = registry.get_image_metadata(request.context, image_id) return utils.create_mashup_dict(image_metadata) except exception.NotFound as e: LOG.debug("No metadata found for image '%s'", image_id) raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) def _get_v2_image_metadata(self, request, image_id): """ Retrieves image and for v2 api and creates adapter like object to access image core or custom properties on request. """ db_api = glance.db.get_api() image_repo = glance.db.ImageRepo(request.context, db_api) try: image = image_repo.get(image_id) # Storing image object in request as it is required in # _process_v2_request call. request.environ['api.cache.image'] = image return policy.ImageTarget(image) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) def process_request(self, request): """ For requests for an image file, we check the local image cache. If present, we return the image file, appending the image metadata in headers. If not present, we pass the request on to the next application in the pipeline. """ match = self._match_request(request) try: (version, method, image_id) = match except TypeError: # Trying to unpack None raises this exception return None self._stash_request_info(request, image_id, method, version) if request.method != 'GET' or not self.cache.is_cached(image_id): return None method = getattr(self, '_get_%s_image_metadata' % version) image_metadata = method(request, image_id) # Deactivated images shall not be served from cache if image_metadata['status'] == 'deactivated': return None try: self._enforce(request, 'download_image', target=image_metadata) except exception.Forbidden: return None LOG.debug("Cache hit for image '%s'", image_id) image_iterator = self.get_from_cache(image_id) method = getattr(self, '_process_%s_request' % version) try: return method(request, image_id, image_iterator, image_metadata) except exception.ImageNotFound: msg = _LE("Image cache contained image file for image '%s', " "however the registry did not contain metadata for " "that image!") % image_id LOG.error(msg) self.cache.delete_cached_image(image_id) @staticmethod def _stash_request_info(request, image_id, method, version): """ Preserve the image id, version and request method for later retrieval """ request.environ['api.cache.image_id'] = image_id request.environ['api.cache.method'] = method request.environ['api.cache.version'] = version @staticmethod def _fetch_request_info(request): """ Preserve the cached image id, version for consumption by the process_response method of this middleware """ try: image_id = request.environ['api.cache.image_id'] method = request.environ['api.cache.method'] version = request.environ['api.cache.version'] except KeyError: return None else: return (image_id, method, version) def _process_v1_request(self, request, image_id, image_iterator, image_meta): # Don't display location if 'location' in image_meta: del image_meta['location'] image_meta.pop('location_data', None) self._verify_metadata(image_meta) response = webob.Response(request=request) raw_response = { 'image_iterator': image_iterator, 'image_meta': image_meta, } return self.serializer.show(response, raw_response) def _process_v2_request(self, request, image_id, image_iterator, image_meta): # We do some contortions to get the image_metadata so # that we can provide it to 'size_checked_iter' which # will generate a notification. # TODO(mclaren): Make notification happen more # naturally once caching is part of the domain model. image = request.environ['api.cache.image'] self._verify_metadata(image_meta) response = webob.Response(request=request) response.app_iter = size_checked_iter(response, image_meta, image_meta['size'], image_iterator, notifier.Notifier()) # NOTE (flwang): Set the content-type, content-md5 and content-length # explicitly to be consistent with the non-cache scenario. # Besides, it's not worth the candle to invoke the "download" method # of ResponseSerializer under image_data. Because method "download" # will reset the app_iter. Then we have to call method # "size_checked_iter" to avoid missing any notification. But after # call "size_checked_iter", we will lose the content-md5 and # content-length got by the method "download" because of this issue: # https://github.com/Pylons/webob/issues/86 response.headers['Content-Type'] = 'application/octet-stream' response.headers['Content-MD5'] = (image.checksum.encode('utf-8') if six.PY2 else image.checksum) response.headers['Content-Length'] = str(image.size) return response def process_response(self, resp): """ We intercept the response coming back from the main images Resource, removing image file from the cache if necessary """ status_code = self.get_status_code(resp) if not 200 <= status_code < 300: return resp try: (image_id, method, version) = self._fetch_request_info( resp.request) except TypeError: return resp if method == 'GET' and status_code == 204: # Bugfix:1251055 - Don't cache non-existent image files. # NOTE: Both GET for an image without locations and DELETE return # 204 but DELETE should be processed. return resp method_str = '_process_%s_response' % method try: process_response_method = getattr(self, method_str) except AttributeError: LOG.error(_LE('could not find %s') % method_str) # Nothing to do here, move along return resp else: return process_response_method(resp, image_id, version=version) def _process_DELETE_response(self, resp, image_id, version=None): if self.cache.is_cached(image_id): LOG.debug("Removing image %s from cache", image_id) self.cache.delete_cached_image(image_id) return resp def _process_GET_response(self, resp, image_id, version=None): image_checksum = resp.headers.get('Content-MD5') if not image_checksum: # API V1 stores the checksum in a different header: image_checksum = resp.headers.get('x-image-meta-checksum') if not image_checksum: LOG.error(_LE("Checksum header is missing.")) # fetch image_meta on the basis of version image_metadata = None if version: method = getattr(self, '_get_%s_image_metadata' % version) image_metadata = method(resp.request, image_id) # NOTE(zhiyan): image_cache return a generator object and set to # response.app_iter, it will be called by eventlet.wsgi later. # So we need enforce policy firstly but do it by application # since eventlet.wsgi could not catch webob.exc.HTTPForbidden and # return 403 error to client then. self._enforce(resp.request, 'download_image', target=image_metadata) resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum, resp.app_iter) return resp def get_status_code(self, response): """ Returns the integer status code from the response, which can be either a Webob.Response (used in testing) or httplib.Response """ if hasattr(response, 'status_int'): return response.status_int return response.status def get_from_cache(self, image_id): """Called if cache hit""" with self.cache.open_for_read(image_id) as cache_file: chunks = utils.chunkiter(cache_file) for chunk in chunks: yield chunk glance-12.0.0/glance/api/middleware/version_negotiation.py0000664000567000056710000001066212701407047024751 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A filter middleware that inspects the requested URI for a version string and/or Accept headers and attempts to negotiate an API controller to return """ from oslo_config import cfg from oslo_log import log as logging from glance.api.glare import versions as artifacts_versions from glance.api import versions from glance.common import wsgi CONF = cfg.CONF LOG = logging.getLogger(__name__) class VersionNegotiationFilter(wsgi.Middleware): def __init__(self, app): self.versions_app = versions.Controller() self.allowed_versions = None self.vnd_mime_type = 'application/vnd.openstack.images-' super(VersionNegotiationFilter, self).__init__(app) def process_request(self, req): """Try to find a version first in the accept header, then the URL""" args = {'method': req.method, 'path': req.path, 'accept': req.accept} LOG.debug("Determining version of request: %(method)s %(path)s " "Accept: %(accept)s", args) # If the request is for /versions, just return the versions container if req.path_info_peek() == "versions": return self.versions_app.index(req, explicit=True) accept = str(req.accept) if accept.startswith(self.vnd_mime_type): LOG.debug("Using media-type versioning") token_loc = len(self.vnd_mime_type) req_version = accept[token_loc:] else: LOG.debug("Using url versioning") # Remove version in url so it doesn't conflict later req_version = self._pop_path_info(req) try: version = self._match_version_string(req_version) except ValueError: LOG.debug("Unknown version. Returning version choices.") return self.versions_app req.environ['api.version'] = version req.path_info = ''.join(('/v', str(version), req.path_info)) LOG.debug("Matched version: v%d", version) LOG.debug('new path %s', req.path_info) return None def _get_allowed_versions(self): allowed_versions = {} if CONF.enable_v1_api: allowed_versions['v1'] = 1 allowed_versions['v1.0'] = 1 allowed_versions['v1.1'] = 1 if CONF.enable_v2_api: allowed_versions['v2'] = 2 allowed_versions['v2.0'] = 2 allowed_versions['v2.1'] = 2 allowed_versions['v2.2'] = 2 return allowed_versions def _match_version_string(self, subject): """ Given a string, tries to match a major and/or minor version number. :param subject: The string to check :returns: version found in the subject :raises: ValueError if no acceptable version could be found """ if self.allowed_versions is None: self.allowed_versions = self._get_allowed_versions() if subject in self.allowed_versions: return self.allowed_versions[subject] else: raise ValueError() def _pop_path_info(self, req): """ 'Pops' off the next segment of PATH_INFO, returns the popped segment. Do NOT push it onto SCRIPT_NAME. """ path = req.path_info if not path: return None while path.startswith('/'): path = path[1:] idx = path.find('/') if idx == -1: idx = len(path) r = path[:idx] req.path_info = path[idx:] return r class GlareVersionNegotiationFilter(VersionNegotiationFilter): def __init__(self, app): super(GlareVersionNegotiationFilter, self).__init__(app) self.versions_app = artifacts_versions.Controller() self.vnd_mime_type = 'application/vnd.openstack.artifacts-' def _get_allowed_versions(self): return { 'v0.1': 0.1 } glance-12.0.0/glance/api/middleware/context.py0000664000567000056710000001344312701407047022350 0ustar jenkinsjenkins00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import webob.exc from glance.api import policy from glance.common import wsgi import glance.context from glance.i18n import _ context_opts = [ cfg.BoolOpt('owner_is_tenant', default=True, help=_('When true, this option sets the owner of an image ' 'to be the tenant. Otherwise, the owner of the ' ' image will be the authenticated user issuing the ' 'request.')), cfg.StrOpt('admin_role', default='admin', help=_('Role used to identify an authenticated user as ' 'administrator.')), cfg.BoolOpt('allow_anonymous_access', default=False, help=_('Allow unauthenticated users to access the API with ' 'read-only privileges. This only applies when using ' 'ContextMiddleware.')), cfg.IntOpt('max_request_id_length', default=64, help=_('Limits request ID length.')), ] CONF = cfg.CONF CONF.register_opts(context_opts) LOG = logging.getLogger(__name__) class BaseContextMiddleware(wsgi.Middleware): def process_response(self, resp): try: request_id = resp.request.context.request_id except AttributeError: LOG.warn(_('Unable to retrieve request id from context')) else: # For python 3 compatibility need to use bytes type prefix = b'req-' if isinstance(request_id, bytes) else 'req-' if not request_id.startswith(prefix): request_id = prefix + request_id resp.headers['x-openstack-request-id'] = request_id return resp class ContextMiddleware(BaseContextMiddleware): def __init__(self, app): self.policy_enforcer = policy.Enforcer() super(ContextMiddleware, self).__init__(app) def process_request(self, req): """Convert authentication information into a request context Generate a glance.context.RequestContext object from the available authentication headers and store on the 'context' attribute of the req object. :param req: wsgi request object that will be given the context object :raises: webob.exc.HTTPUnauthorized: when value of the X-Identity-Status header is not 'Confirmed' and anonymous access is disallowed """ if req.headers.get('X-Identity-Status') == 'Confirmed': req.context = self._get_authenticated_context(req) elif CONF.allow_anonymous_access: req.context = self._get_anonymous_context() else: raise webob.exc.HTTPUnauthorized() def _get_anonymous_context(self): kwargs = { 'user': None, 'tenant': None, 'roles': [], 'is_admin': False, 'read_only': True, 'policy_enforcer': self.policy_enforcer, } return glance.context.RequestContext(**kwargs) def _get_authenticated_context(self, req): # NOTE(bcwaldon): X-Roles is a csv string, but we need to parse # it into a list to be useful roles_header = req.headers.get('X-Roles', '') roles = [r.strip().lower() for r in roles_header.split(',')] # NOTE(bcwaldon): This header is deprecated in favor of X-Auth-Token deprecated_token = req.headers.get('X-Storage-Token') service_catalog = None if req.headers.get('X-Service-Catalog') is not None: try: catalog_header = req.headers.get('X-Service-Catalog') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) request_id = req.headers.get('X-Openstack-Request-ID') if request_id and (0 < CONF.max_request_id_length < len(request_id)): msg = (_('x-openstack-request-id is too long, max size %s') % CONF.max_request_id_length) return webob.exc.HTTPRequestHeaderFieldsTooLarge(comment=msg) kwargs = { 'user': req.headers.get('X-User-Id'), 'tenant': req.headers.get('X-Tenant-Id'), 'roles': roles, 'is_admin': CONF.admin_role.strip().lower() in roles, 'auth_token': req.headers.get('X-Auth-Token', deprecated_token), 'owner_is_tenant': CONF.owner_is_tenant, 'service_catalog': service_catalog, 'policy_enforcer': self.policy_enforcer, 'request_id': request_id, } return glance.context.RequestContext(**kwargs) class UnauthenticatedContextMiddleware(BaseContextMiddleware): def process_request(self, req): """Create a context without an authorized user.""" kwargs = { 'user': None, 'tenant': None, 'roles': [], 'is_admin': True, } req.context = glance.context.RequestContext(**kwargs) glance-12.0.0/glance/api/middleware/cache_manage.py0000664000567000056710000000572712701407047023245 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Image Cache Management API """ from oslo_log import log as logging import routes from glance.api import cached_images from glance.common import wsgi from glance.i18n import _LI LOG = logging.getLogger(__name__) class CacheManageFilter(wsgi.Middleware): def __init__(self, app): mapper = routes.Mapper() resource = cached_images.create_resource() mapper.connect("/v1/cached_images", controller=resource, action="get_cached_images", conditions=dict(method=["GET"])) mapper.connect("/v1/cached_images/{image_id}", controller=resource, action="delete_cached_image", conditions=dict(method=["DELETE"])) mapper.connect("/v1/cached_images", controller=resource, action="delete_cached_images", conditions=dict(method=["DELETE"])) mapper.connect("/v1/queued_images/{image_id}", controller=resource, action="queue_image", conditions=dict(method=["PUT"])) mapper.connect("/v1/queued_images", controller=resource, action="get_queued_images", conditions=dict(method=["GET"])) mapper.connect("/v1/queued_images/{image_id}", controller=resource, action="delete_queued_image", conditions=dict(method=["DELETE"])) mapper.connect("/v1/queued_images", controller=resource, action="delete_queued_images", conditions=dict(method=["DELETE"])) self._mapper = mapper self._resource = resource LOG.info(_LI("Initialized image cache management middleware")) super(CacheManageFilter, self).__init__(app) def process_request(self, request): # Map request to our resource object if we can handle it match = self._mapper.match(request.path_info, request.environ) if match: request.environ['wsgiorg.routing_args'] = (None, match) return self._resource(request) # Pass off downstream if we don't match the request path else: return None glance-12.0.0/glance/api/middleware/gzip.py0000664000567000056710000000433712701407047021637 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Use gzip compression if the client accepts it. """ import re from oslo_log import log as logging from glance.common import wsgi from glance.i18n import _LI LOG = logging.getLogger(__name__) class GzipMiddleware(wsgi.Middleware): re_zip = re.compile(r'\bgzip\b') def __init__(self, app): LOG.info(_LI("Initialized gzip middleware")) super(GzipMiddleware, self).__init__(app) def process_response(self, response): request = response.request accept_encoding = request.headers.get('Accept-Encoding', '') if self.re_zip.search(accept_encoding): # NOTE(flaper87): Webob removes the content-md5 when # app_iter is called. We'll keep it and reset it later checksum = response.headers.get("Content-MD5") # NOTE(flaper87): We'll use lazy for images so # that they can be compressed without reading # the whole content in memory. Notice that using # lazy will set response's content-length to 0. content_type = response.headers["Content-Type"] lazy = content_type == "application/octet-stream" # NOTE(flaper87): Webob takes care of the compression # process, it will replace the body either with a # compressed body or a generator - used for lazy com # pression - depending on the lazy value. # # Webob itself will set the Content-Encoding header. response.encode_content(lazy=lazy) if checksum: response.headers['Content-MD5'] = checksum return response glance-12.0.0/glance/api/v2/0000775000567000056710000000000012701407204016512 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/v2/image_tags.py0000664000567000056710000001037412701407047021176 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from oslo_log import log as logging from oslo_utils import encodeutils import webob.exc from glance.api import policy from glance.api.v2 import images as v2_api from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier LOG = logging.getLogger(__name__) class Controller(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) @utils.mutating def update(self, req, image_id, tag_value): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) image.tags.add(tag_value) image_repo.save(image) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("Not allowed to update tags for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.Invalid as e: msg = (_("Could not update image: %s") % encodeutils.exception_to_unicode(e)) LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) except exception.ImageTagLimitExceeded as e: msg = (_("Image tag limit exceeded for image %(id)s: %(e)s:") % {"id": image_id, "e": encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) @utils.mutating def delete(self, req, image_id, tag_value): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) if tag_value not in image.tags: raise webob.exc.HTTPNotFound() image.tags.remove(tag_value) image_repo.save(image) except exception.NotFound: msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("Not allowed to delete tags for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def update(self, response, result): response.status_int = 204 def delete(self, response, result): response.status_int = 204 class RequestDeserializer(wsgi.JSONRequestDeserializer): def update(self, request): try: schema = v2_api.get_schema() schema_format = {"tags": [request.urlvars.get('tag_value')]} schema.validate(schema_format) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) return super(RequestDeserializer, self).default(request) def create_resource(): """Images resource factory method""" serializer = ResponseSerializer() deserializer = RequestDeserializer() controller = Controller() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/__init__.py0000664000567000056710000000000012701407047020616 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/v2/metadef_objects.py0000664000567000056710000003260512701407047022215 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import six import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2 import metadef_namespaces as namespaces from glance.api.v2.model.metadef_object import MetadefObject from glance.api.v2.model.metadef_object import MetadefObjects from glance.common import exception from glance.common import wsgi from glance.common import wsme_utils import glance.db from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF class MetadefObjectsController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) self.obj_schema_link = '/v2/schemas/metadefs/object' def create(self, req, metadata_object, namespace): object_factory = self.gateway.get_metadef_object_factory(req.context) object_repo = self.gateway.get_metadef_object_repo(req.context) try: new_meta_object = object_factory.new_object( namespace=namespace, **metadata_object.to_dict()) object_repo.add(new_meta_object) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata object within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return MetadefObject.to_wsme_model( new_meta_object, get_object_href(namespace, new_meta_object), self.obj_schema_link) def index(self, req, namespace, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): try: filters = filters or dict() filters['namespace'] = namespace object_repo = self.gateway.get_metadef_object_repo(req.context) db_metaobject_list = object_repo.list( marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters) object_list = [MetadefObject.to_wsme_model( db_metaobject, get_object_href(namespace, db_metaobject), self.obj_schema_link) for db_metaobject in db_metaobject_list] metadef_objects = MetadefObjects() metadef_objects.objects = object_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata objects within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return metadef_objects def show(self, req, namespace, object_name): meta_object_repo = self.gateway.get_metadef_object_repo( req.context) try: metadef_object = meta_object_repo.get(namespace, object_name) return MetadefObject.to_wsme_model( metadef_object, get_object_href(namespace, metadef_object), self.obj_schema_link) except exception.Forbidden as e: LOG.debug("User not permitted to show metadata object '%s' " "within '%s' namespace", namespace, object_name) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() def update(self, req, metadata_object, namespace, object_name): meta_repo = self.gateway.get_metadef_object_repo(req.context) try: metadef_object = meta_repo.get(namespace, object_name) metadef_object._old_name = metadef_object.name metadef_object.name = wsme_utils._get_value( metadata_object.name) metadef_object.description = wsme_utils._get_value( metadata_object.description) metadef_object.required = wsme_utils._get_value( metadata_object.required) metadef_object.properties = wsme_utils._get_value( metadata_object.properties) updated_metadata_obj = meta_repo.save(metadef_object) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata object '%s' " "within '%s' namespace ", object_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return MetadefObject.to_wsme_model( updated_metadata_obj, get_object_href(namespace, updated_metadata_obj), self.obj_schema_link) def delete(self, req, namespace, object_name): meta_repo = self.gateway.get_metadef_object_repo(req.context) try: metadef_object = meta_repo.get(namespace, object_name) metadef_object.delete() meta_repo.remove(metadef_object) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata object '%s' " "within '%s' namespace", object_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() def _get_base_definitions(): return namespaces.get_schema_definitions() def _get_base_properties(): return { "name": { "type": "string" }, "description": { "type": "string" }, "required": { "$ref": "#/definitions/stringArray" }, "properties": { "$ref": "#/definitions/property" }, "schema": { 'readOnly': True, "type": "string" }, "self": { 'readOnly': True, "type": "string" }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of object creation"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last object modification"), "format": "date-time" } } def get_schema(): definitions = _get_base_definitions() properties = _get_base_properties() mandatory_attrs = MetadefObject.get_mandatory_attrs() schema = glance.schema.Schema( 'object', properties, required=mandatory_attrs, definitions=definitions, ) return schema def get_collection_schema(): object_schema = get_schema() return glance.schema.CollectionSchema('objects', object_schema) class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) metadata_object = json.fromjson(MetadefObject, body) return dict(metadata_object=metadata_object) def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) metadata_object = json.fromjson(MetadefObject, body) return dict(metadata_object=metadata_object) def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['public', 'private', 'shared']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit <= 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def create(self, response, metadata_object): response.status_int = 201 self.show(response, metadata_object) def show(self, response, metadata_object): metadata_object_json = json.tojson(MetadefObject, metadata_object) body = jsonutils.dumps(metadata_object_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def update(self, response, metadata_object): response.status_int = 200 self.show(response, metadata_object) def index(self, response, result): result.schema = "v2/schemas/metadefs/objects" metadata_objects_json = json.tojson(MetadefObjects, result) body = jsonutils.dumps(metadata_objects_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 def get_object_href(namespace_name, metadef_object): base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadef_object.name)) return base_href def create_resource(): """Metadef objects resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = MetadefObjectsController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/router.py0000664000567000056710000006140412701407047020416 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api.v2 import image_actions from glance.api.v2 import image_data from glance.api.v2 import image_members from glance.api.v2 import image_tags from glance.api.v2 import images from glance.api.v2 import metadef_namespaces from glance.api.v2 import metadef_objects from glance.api.v2 import metadef_properties from glance.api.v2 import metadef_resource_types from glance.api.v2 import metadef_tags from glance.api.v2 import schemas from glance.api.v2 import tasks from glance.common import wsgi class API(wsgi.Router): """WSGI router for Glance v2 API requests.""" def __init__(self, mapper): custom_image_properties = images.load_custom_properties() reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) schemas_resource = schemas.create_resource(custom_image_properties) mapper.connect('/schemas/image', controller=schemas_resource, action='image', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/image', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/images', controller=schemas_resource, action='images', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/images', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/member', controller=schemas_resource, action='member', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/member', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/members', controller=schemas_resource, action='members', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/members', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/task', controller=schemas_resource, action='task', conditions={'method': ['GET']}) mapper.connect('/schemas/task', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/tasks', controller=schemas_resource, action='tasks', conditions={'method': ['GET']}) mapper.connect('/schemas/tasks', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/namespace', controller=schemas_resource, action='metadef_namespace', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/namespace', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/namespaces', controller=schemas_resource, action='metadef_namespaces', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/namespaces', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/resource_type', controller=schemas_resource, action='metadef_resource_type', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/resource_type', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/resource_types', controller=schemas_resource, action='metadef_resource_types', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/resource_types', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/property', controller=schemas_resource, action='metadef_property', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/property', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/properties', controller=schemas_resource, action='metadef_properties', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/properties', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/object', controller=schemas_resource, action='metadef_object', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/object', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/objects', controller=schemas_resource, action='metadef_objects', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/objects', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/tag', controller=schemas_resource, action='metadef_tag', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/tag', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/schemas/metadefs/tags', controller=schemas_resource, action='metadef_tags', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/schemas/metadefs/tags', controller=reject_method_resource, action='reject', allowed_methods='GET') # Metadef resource types metadef_resource_types_resource = ( metadef_resource_types.create_resource()) mapper.connect('/metadefs/resource_types', controller=metadef_resource_types_resource, action='index', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/resource_types', controller=reject_method_resource, action='reject', allowed_methods='GET') mapper.connect('/metadefs/namespaces/{namespace}/resource_types', controller=metadef_resource_types_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/resource_types', controller=metadef_resource_types_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces/{namespace}/resource_types', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/metadefs/namespaces/{namespace}/resource_types/' '{resource_type}', controller=metadef_resource_types_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/resource_types/' '{resource_type}', controller=reject_method_resource, action='reject', allowed_methods='DELETE') # Metadef Namespaces metadef_namespace_resource = metadef_namespaces.create_resource() mapper.connect('/metadefs/namespaces', controller=metadef_namespace_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/metadefs/namespaces', controller=metadef_namespace_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/metadefs/namespaces/{namespace}', controller=metadef_namespace_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}', controller=metadef_namespace_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/metadefs/namespaces/{namespace}', controller=metadef_namespace_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') # Metadef namespace properties metadef_properties_resource = metadef_properties.create_resource() mapper.connect('/metadefs/namespaces/{namespace}/properties', controller=metadef_properties_resource, action='index', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/properties', controller=metadef_properties_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces/{namespace}/properties', controller=metadef_namespace_resource, action='delete_properties', conditions={'method': ['DELETE']}) mapper.connect('/metadefs/namespaces/{namespace}/properties', controller=reject_method_resource, action='reject', allowed_methods='GET, POST, DELETE') mapper.connect('/metadefs/namespaces/{namespace}/properties/{' 'property_name}', controller=metadef_properties_resource, action='show', conditions={'method': ['GET']}) mapper.connect('/metadefs/namespaces/{namespace}/properties/{' 'property_name}', controller=metadef_properties_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/metadefs/namespaces/{namespace}/properties/{' 'property_name}', controller=metadef_properties_resource, action='delete', conditions={'method': ['DELETE']}) mapper.connect('/metadefs/namespaces/{namespace}/properties/{' 'property_name}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') # Metadef objects metadef_objects_resource = metadef_objects.create_resource() mapper.connect('/metadefs/namespaces/{namespace}/objects', controller=metadef_objects_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/metadefs/namespaces/{namespace}/objects', controller=metadef_objects_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces/{namespace}/objects', controller=metadef_namespace_resource, action='delete_objects', conditions={'method': ['DELETE']}) mapper.connect('/metadefs/namespaces/{namespace}/objects', controller=reject_method_resource, action='reject', allowed_methods='GET, POST, DELETE') mapper.connect('/metadefs/namespaces/{namespace}/objects/{' 'object_name}', controller=metadef_objects_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/objects/{' 'object_name}', controller=metadef_objects_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/metadefs/namespaces/{namespace}/objects/{' 'object_name}', controller=metadef_objects_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/objects/{' 'object_name}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') # Metadef tags metadef_tags_resource = metadef_tags.create_resource() mapper.connect('/metadefs/namespaces/{namespace}/tags', controller=metadef_tags_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/metadefs/namespaces/{namespace}/tags', controller=metadef_tags_resource, action='create_tags', conditions={'method': ['POST']}) mapper.connect('/metadefs/namespaces/{namespace}/tags', controller=metadef_namespace_resource, action='delete_tags', conditions={'method': ['DELETE']}) mapper.connect('/metadefs/namespaces/{namespace}/tags', controller=reject_method_resource, action='reject', allowed_methods='GET, POST, DELETE') mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=metadef_tags_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=metadef_tags_resource, action='create', conditions={'method': ['POST']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=metadef_tags_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=metadef_tags_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', controller=reject_method_resource, action='reject', allowed_methods='GET, POST, PUT, DELETE') images_resource = images.create_resource(custom_image_properties) mapper.connect('/images', controller=images_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/images', controller=images_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/images', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/images/{image_id}', controller=images_resource, action='update', conditions={'method': ['PATCH']}) mapper.connect('/images/{image_id}', controller=images_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}', controller=images_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/images/{image_id}', controller=reject_method_resource, action='reject', allowed_methods='GET, PATCH, DELETE') image_actions_resource = image_actions.create_resource() mapper.connect('/images/{image_id}/actions/deactivate', controller=image_actions_resource, action='deactivate', conditions={'method': ['POST']}, body_reject=True) mapper.connect('/images/{image_id}/actions/reactivate', controller=image_actions_resource, action='reactivate', conditions={'method': ['POST']}, body_reject=True) mapper.connect('/images/{image_id}/actions/deactivate', controller=reject_method_resource, action='reject', allowed_methods='POST') mapper.connect('/images/{image_id}/actions/reactivate', controller=reject_method_resource, action='reject', allowed_methods='POST') image_data_resource = image_data.create_resource() mapper.connect('/images/{image_id}/file', controller=image_data_resource, action='download', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}/file', controller=image_data_resource, action='upload', conditions={'method': ['PUT']}) mapper.connect('/images/{image_id}/file', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT') image_tags_resource = image_tags.create_resource() mapper.connect('/images/{image_id}/tags/{tag_value}', controller=image_tags_resource, action='update', conditions={'method': ['PUT']}, body_reject=True) mapper.connect('/images/{image_id}/tags/{tag_value}', controller=image_tags_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/images/{image_id}/tags/{tag_value}', controller=reject_method_resource, action='reject', allowed_methods='PUT, DELETE') image_members_resource = image_members.create_resource() mapper.connect('/images/{image_id}/members', controller=image_members_resource, action='index', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}/members', controller=image_members_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/images/{image_id}/members', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/images/{image_id}/members/{member_id}', controller=image_members_resource, action='show', conditions={'method': ['GET']}, body_reject=True) mapper.connect('/images/{image_id}/members/{member_id}', controller=image_members_resource, action='update', conditions={'method': ['PUT']}) mapper.connect('/images/{image_id}/members/{member_id}', controller=image_members_resource, action='delete', conditions={'method': ['DELETE']}, body_reject=True) mapper.connect('/images/{image_id}/members/{member_id}', controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') tasks_resource = tasks.create_resource() mapper.connect('/tasks', controller=tasks_resource, action='create', conditions={'method': ['POST']}) mapper.connect('/tasks', controller=tasks_resource, action='index', conditions={'method': ['GET']}) mapper.connect('/tasks', controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect('/tasks/{task_id}', controller=tasks_resource, action='get', conditions={'method': ['GET']}) mapper.connect('/tasks/{task_id}', controller=tasks_resource, action='delete', conditions={'method': ['DELETE']}) mapper.connect('/tasks/{task_id}', controller=reject_method_resource, action='reject', allowed_methods='GET, DELETE') super(API, self).__init__(mapper) glance-12.0.0/glance/api/v2/metadef_resource_types.py0000664000567000056710000002606312701407047023640 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import six import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_resource_type import ResourceType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations from glance.api.v2.model.metadef_resource_type import ResourceTypes from glance.common import exception from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class ResourceTypeController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) def index(self, req): try: filters = {'namespace': None} rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type_list = rs_type_repo.list(filters=filters) resource_type_list = [ResourceType.to_wsme_model( resource_type) for resource_type in db_resource_type_list] resource_types = ResourceTypes() resource_types.resource_types = resource_type_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata resource types " "index") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError(e) return resource_types def show(self, req, namespace): try: filters = {'namespace': namespace} rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type_list = rs_type_repo.list(filters=filters) resource_type_list = [ResourceTypeAssociation.to_wsme_model( resource_type) for resource_type in db_resource_type_list] resource_types = ResourceTypeAssociations() resource_types.resource_type_associations = resource_type_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata resource types " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError(e) return resource_types def create(self, req, resource_type, namespace): rs_type_factory = self.gateway.get_metadef_resource_type_factory( req.context) rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) try: new_resource_type = rs_type_factory.new_resource_type( namespace=namespace, **resource_type.to_dict()) rs_type_repo.add(new_resource_type) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata resource type " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return ResourceTypeAssociation.to_wsme_model(new_resource_type) def delete(self, req, namespace, resource_type): rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) try: filters = {} found = False filters['namespace'] = namespace db_resource_type_list = rs_type_repo.list(filters=filters) for db_resource_type in db_resource_type_list: if db_resource_type.name == resource_type: db_resource_type.delete() rs_type_repo.remove(db_resource_type) found = True if not found: raise exception.NotFound() except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata resource type " "'%s' within '%s' namespace", resource_type, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: msg = (_("Failed to find resource type %(resourcetype)s to " "delete") % {'resourcetype': resource_type}) LOG.error(msg) raise webob.exc.HTTPNotFound(explanation=msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) resource_type = json.fromjson(ResourceTypeAssociation, body) return dict(resource_type=resource_type) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def show(self, response, result): resource_type_json = json.tojson(ResourceTypeAssociations, result) body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def index(self, response, result): resource_type_json = json.tojson(ResourceTypes, result) body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def create(self, response, result): resource_type_json = json.tojson(ResourceTypeAssociation, result) response.status_int = 201 body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 def _get_base_properties(): return { 'name': { 'type': 'string', 'description': _('Resource type names should be aligned with Heat ' 'resource types whenever possible: ' 'http://docs.openstack.org/developer/heat/' 'template_guide/openstack.html'), 'maxLength': 80, }, 'prefix': { 'type': 'string', 'description': _('Specifies the prefix to use for the given ' 'resource type. Any properties in the namespace ' 'should be prefixed with this prefix when being ' 'applied to the specified resource type. Must ' 'include prefix separator (e.g. a colon :).'), 'maxLength': 80, }, 'properties_target': { 'type': 'string', 'description': _('Some resource types allow more than one key / ' 'value pair per instance. For example, Cinder ' 'allows user and image metadata on volumes. Only ' 'the image properties metadata is evaluated by ' 'Nova (scheduling or drivers). This property ' 'allows a namespace target to remove the ' 'ambiguity.'), 'maxLength': 80, }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of resource type association"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last resource type " "association modification"), "format": "date-time" } } def get_schema(): properties = _get_base_properties() mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs() schema = glance.schema.Schema( 'resource_type_association', properties, required=mandatory_attrs, ) return schema def get_collection_schema(): resource_type_schema = get_schema() return glance.schema.CollectionSchema('resource_type_associations', resource_type_schema) def create_resource(): """ResourceTypeAssociation resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ResourceTypeController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/metadef_tags.py0000664000567000056710000003433112701407047021520 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import six import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_tag import MetadefTag from glance.api.v2.model.metadef_tag import MetadefTags from glance.common import exception from glance.common import wsgi from glance.common import wsme_utils import glance.db from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF class TagsController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) self.tag_schema_link = '/v2/schemas/metadefs/tag' def create(self, req, namespace, tag_name): tag_factory = self.gateway.get_metadef_tag_factory(req.context) tag_repo = self.gateway.get_metadef_tag_repo(req.context) tag_name_as_dict = {'name': tag_name} try: new_meta_tag = tag_factory.new_tag( namespace=namespace, **tag_name_as_dict) tag_repo.add(new_meta_tag) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata tag within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return MetadefTag.to_wsme_model(new_meta_tag) def create_tags(self, req, metadata_tags, namespace): tag_factory = self.gateway.get_metadef_tag_factory(req.context) tag_repo = self.gateway.get_metadef_tag_repo(req.context) try: tag_list = [] for metadata_tag in metadata_tags.tags: tag_list.append(tag_factory.new_tag( namespace=namespace, **metadata_tag.to_dict())) tag_repo.add_tags(tag_list) tag_list_out = [MetadefTag(**{'name': db_metatag.name}) for db_metatag in tag_list] metadef_tags = MetadefTags() metadef_tags.tags = tag_list_out except exception.Forbidden as e: LOG.debug("User not permitted to create metadata tags within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return metadef_tags def index(self, req, namespace, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): try: filters = filters or dict() filters['namespace'] = namespace tag_repo = self.gateway.get_metadef_tag_repo(req.context) if marker: metadef_tag = tag_repo.get(namespace, marker) marker = metadef_tag.tag_id db_metatag_list = tag_repo.list( marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters) tag_list = [MetadefTag(**{'name': db_metatag.name}) for db_metatag in db_metatag_list] metadef_tags = MetadefTags() metadef_tags.tags = tag_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata tags " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return metadef_tags def show(self, req, namespace, tag_name): meta_tag_repo = self.gateway.get_metadef_tag_repo(req.context) try: metadef_tag = meta_tag_repo.get(namespace, tag_name) return MetadefTag.to_wsme_model(metadef_tag) except exception.Forbidden as e: LOG.debug("User not permitted to show metadata tag '%s' " "within '%s' namespace", tag_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() def update(self, req, metadata_tag, namespace, tag_name): meta_repo = self.gateway.get_metadef_tag_repo(req.context) try: metadef_tag = meta_repo.get(namespace, tag_name) metadef_tag._old_name = metadef_tag.name metadef_tag.name = wsme_utils._get_value( metadata_tag.name) updated_metadata_tag = meta_repo.save(metadef_tag) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata tag '%s' " "within '%s' namespace", tag_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return MetadefTag.to_wsme_model(updated_metadata_tag) def delete(self, req, namespace, tag_name): meta_repo = self.gateway.get_metadef_tag_repo(req.context) try: metadef_tag = meta_repo.get(namespace, tag_name) metadef_tag.delete() meta_repo.remove(metadef_tag) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata tag '%s' " "within '%s' namespace", tag_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() def _get_base_definitions(): return None def _get_base_properties(): return { "name": { "type": "string" }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of tag creation"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last tag modification"), "format": "date-time" } } def _get_base_properties_for_list(): return { "tags": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, 'required': ['name'], "additionalProperties": False } }, } def get_schema(): definitions = _get_base_definitions() properties = _get_base_properties() mandatory_attrs = MetadefTag.get_mandatory_attrs() schema = glance.schema.Schema( 'tag', properties, required=mandatory_attrs, definitions=definitions, ) return schema def get_schema_for_list(): definitions = _get_base_definitions() properties = _get_base_properties_for_list() schema = glance.schema.Schema( 'tags', properties, required=None, definitions=definitions, ) return schema def get_collection_schema(): tag_schema = get_schema() return glance.schema.CollectionSchema('tags', tag_schema) class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() self.schema_for_list = get_schema_for_list() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['public', 'private', 'shared']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) metadata_tag = json.fromjson(MetadefTag, body) return dict(metadata_tag=metadata_tag) def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker: query_params['marker'] = marker if limit: query_params['limit'] = self._validate_limit(limit) return query_params def create_tags(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema_for_list.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) metadata_tags = json.fromjson(MetadefTags, body) return dict(metadata_tags=metadata_tags) @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def create(self, response, metadata_tag): response.status_int = 201 self.show(response, metadata_tag) def create_tags(self, response, result): response.status_int = 201 metadata_tags_json = json.tojson(MetadefTags, result) body = jsonutils.dumps(metadata_tags_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def show(self, response, metadata_tag): metadata_tag_json = json.tojson(MetadefTag, metadata_tag) body = jsonutils.dumps(metadata_tag_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def update(self, response, metadata_tag): response.status_int = 200 self.show(response, metadata_tag) def index(self, response, result): metadata_tags_json = json.tojson(MetadefTags, result) body = jsonutils.dumps(metadata_tags_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 def get_tag_href(namespace_name, metadef_tag): base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadef_tag.name)) return base_href def create_resource(): """Metadef tags resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = TagsController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/metadef_namespaces.py0000664000567000056710000007574612701407047022720 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import six import six.moves.urllib.parse as urlparse import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_namespace import Namespace from glance.api.v2.model.metadef_namespace import Namespaces from glance.api.v2.model.metadef_object import MetadefObject from glance.api.v2.model.metadef_property_type import PropertyType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_tag import MetadefTag from glance.common import exception from glance.common import utils from glance.common import wsgi from glance.common import wsme_utils import glance.db import glance.gateway from glance.i18n import _, _LE import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF class NamespaceController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) self.ns_schema_link = '/v2/schemas/metadefs/namespace' self.obj_schema_link = '/v2/schemas/metadefs/object' self.tag_schema_link = '/v2/schemas/metadefs/tag' def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): try: ns_repo = self.gateway.get_metadef_namespace_repo(req.context) # Get namespace id if marker: namespace_obj = ns_repo.get(marker) marker = namespace_obj.namespace_id database_ns_list = ns_repo.list( marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters) for db_namespace in database_ns_list: # Get resource type associations filters = dict() filters['namespace'] = db_namespace.namespace rs_repo = ( self.gateway.get_metadef_resource_type_repo(req.context)) repo_rs_type_list = rs_repo.list(filters=filters) resource_type_list = [ResourceTypeAssociation.to_wsme_model( resource_type) for resource_type in repo_rs_type_list] if resource_type_list: db_namespace.resource_type_associations = ( resource_type_list) namespace_list = [Namespace.to_wsme_model( db_namespace, get_namespace_href(db_namespace), self.ns_schema_link) for db_namespace in database_ns_list] namespaces = Namespaces() namespaces.namespaces = namespace_list if len(namespace_list) != 0 and len(namespace_list) == limit: namespaces.next = namespace_list[-1].namespace except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata namespaces " "index") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return namespaces @utils.mutating def create(self, req, namespace): try: namespace_created = False # Create Namespace ns_factory = self.gateway.get_metadef_namespace_factory( req.context) ns_repo = self.gateway.get_metadef_namespace_repo(req.context) new_namespace = ns_factory.new_namespace(**namespace.to_dict()) ns_repo.add(new_namespace) namespace_created = True # Create Resource Types if namespace.resource_type_associations: rs_factory = (self.gateway.get_metadef_resource_type_factory( req.context)) rs_repo = self.gateway.get_metadef_resource_type_repo( req.context) for resource_type in namespace.resource_type_associations: new_resource = rs_factory.new_resource_type( namespace=namespace.namespace, **resource_type.to_dict()) rs_repo.add(new_resource) # Create Objects if namespace.objects: object_factory = self.gateway.get_metadef_object_factory( req.context) object_repo = self.gateway.get_metadef_object_repo( req.context) for metadata_object in namespace.objects: new_meta_object = object_factory.new_object( namespace=namespace.namespace, **metadata_object.to_dict()) object_repo.add(new_meta_object) # Create Tags if namespace.tags: tag_factory = self.gateway.get_metadef_tag_factory( req.context) tag_repo = self.gateway.get_metadef_tag_repo(req.context) for metadata_tag in namespace.tags: new_meta_tag = tag_factory.new_tag( namespace=namespace.namespace, **metadata_tag.to_dict()) tag_repo.add(new_meta_tag) # Create Namespace Properties if namespace.properties: prop_factory = (self.gateway.get_metadef_property_factory( req.context)) prop_repo = self.gateway.get_metadef_property_repo( req.context) for (name, value) in namespace.properties.items(): new_property_type = ( prop_factory.new_namespace_property( namespace=namespace.namespace, **self._to_property_dict(name, value) )) prop_repo.add(new_property_type) except exception.Forbidden as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) LOG.debug("User not permitted to create metadata namespace") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: self._cleanup_namespace(ns_repo, namespace, namespace_created) raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() # Return the user namespace as we don't expose the id to user new_namespace.properties = namespace.properties new_namespace.objects = namespace.objects new_namespace.resource_type_associations = ( namespace.resource_type_associations) new_namespace.tags = namespace.tags return Namespace.to_wsme_model(new_namespace, get_namespace_href(new_namespace), self.ns_schema_link) def _to_property_dict(self, name, value): # Convert the model PropertyTypes dict to a JSON string db_property_type_dict = dict() db_property_type_dict['schema'] = json.tojson(PropertyType, value) db_property_type_dict['name'] = name return db_property_type_dict def _cleanup_namespace(self, namespace_repo, namespace, namespace_created): if namespace_created: try: namespace_obj = namespace_repo.get(namespace.namespace) namespace_obj.delete() namespace_repo.remove(namespace_obj) LOG.debug("Cleaned up namespace %(namespace)s ", {'namespace': namespace.namespace}) except exception: msg = (_LE("Failed to delete namespace %(namespace)s ") % {'namespace': namespace.namespace}) LOG.error(msg) def show(self, req, namespace, filters=None): try: # Get namespace ns_repo = self.gateway.get_metadef_namespace_repo(req.context) namespace_obj = ns_repo.get(namespace) namespace_detail = Namespace.to_wsme_model( namespace_obj, get_namespace_href(namespace_obj), self.ns_schema_link) ns_filters = dict() ns_filters['namespace'] = namespace # Get objects object_repo = self.gateway.get_metadef_object_repo(req.context) db_metaobject_list = object_repo.list(filters=ns_filters) object_list = [MetadefObject.to_wsme_model( db_metaobject, get_object_href(namespace, db_metaobject), self.obj_schema_link) for db_metaobject in db_metaobject_list] if object_list: namespace_detail.objects = object_list # Get resource type associations rs_repo = self.gateway.get_metadef_resource_type_repo(req.context) db_resource_type_list = rs_repo.list(filters=ns_filters) resource_type_list = [ResourceTypeAssociation.to_wsme_model( resource_type) for resource_type in db_resource_type_list] if resource_type_list: namespace_detail.resource_type_associations = ( resource_type_list) # Get properties prop_repo = self.gateway.get_metadef_property_repo(req.context) db_properties = prop_repo.list(filters=ns_filters) property_list = Namespace.to_model_properties(db_properties) if property_list: namespace_detail.properties = property_list if filters and filters['resource_type']: namespace_detail = self._prefix_property_name( namespace_detail, filters['resource_type']) # Get tags tag_repo = self.gateway.get_metadef_tag_repo(req.context) db_metatag_list = tag_repo.list(filters=ns_filters) tag_list = [MetadefTag(**{'name': db_metatag.name}) for db_metatag in db_metatag_list] if tag_list: namespace_detail.tags = tag_list except exception.Forbidden as e: LOG.debug("User not permitted to show metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return namespace_detail def update(self, req, user_ns, namespace): namespace_repo = self.gateway.get_metadef_namespace_repo(req.context) try: ns_obj = namespace_repo.get(namespace) ns_obj._old_namespace = ns_obj.namespace ns_obj.namespace = wsme_utils._get_value(user_ns.namespace) ns_obj.display_name = wsme_utils._get_value(user_ns.display_name) ns_obj.description = wsme_utils._get_value(user_ns.description) # Following optional fields will default to same values as in # create namespace if not specified ns_obj.visibility = ( wsme_utils._get_value(user_ns.visibility) or 'private') ns_obj.protected = ( wsme_utils._get_value(user_ns.protected) or False) ns_obj.owner = ( wsme_utils._get_value(user_ns.owner) or req.context.owner) updated_namespace = namespace_repo.save(ns_obj) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return Namespace.to_wsme_model(updated_namespace, get_namespace_href(updated_namespace), self.ns_schema_link) def delete(self, req, namespace): namespace_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = namespace_repo.get(namespace) namespace_obj.delete() namespace_repo.remove(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata namespace " "'%s'", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() def delete_objects(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) namespace_obj.delete() ns_repo.remove_objects(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata objects " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() def delete_tags(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) namespace_obj.delete() ns_repo.remove_tags(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata tags " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() def delete_properties(self, req, namespace): ns_repo = self.gateway.get_metadef_namespace_repo(req.context) try: namespace_obj = ns_repo.get(namespace) namespace_obj.delete() ns_repo.remove_properties(namespace_obj) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata properties " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() def _prefix_property_name(self, namespace_detail, user_resource_type): prefix = None if user_resource_type and namespace_detail.resource_type_associations: for resource_type in namespace_detail.resource_type_associations: if resource_type.name == user_resource_type: prefix = resource_type.prefix break if prefix: if namespace_detail.properties: new_property_dict = dict() for (key, value) in namespace_detail.properties.items(): new_property_dict[prefix + key] = value namespace_detail.properties = new_property_dict if namespace_detail.objects: for object in namespace_detail.objects: new_object_property_dict = dict() for (key, value) in object.properties.items(): new_object_property_dict[prefix + key] = value object.properties = new_object_property_dict if object.required and len(object.required) > 0: required = [prefix + name for name in object.required] object.required = required return namespace_detail class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, int(limit)) query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['public', 'private']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def show(self, request): params = request.params.copy() query_params = { 'filters': self._get_filters(params) } return query_params def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) namespace = json.fromjson(Namespace, body) return dict(namespace=namespace) def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) namespace = json.fromjson(Namespace, body) return dict(user_ns=namespace) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def create(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response, 201) response.location = get_namespace_href(namespace) def show(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response) def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urlparse.urlencode(params) result.first = "/v2/metadefs/namespaces" result.schema = "/v2/schemas/metadefs/namespaces" if query: result.first = '%s?%s' % (result.first, query) if result.next: params['marker'] = result.next next_query = urlparse.urlencode(params) result.next = '/v2/metadefs/namespaces?%s' % next_query ns_json = json.tojson(Namespaces, result) response = self.__render(ns_json, response) def update(self, response, namespace): ns_json = json.tojson(Namespace, namespace) response = self.__render(ns_json, response, 200) def delete(self, response, result): response.status_int = 204 def delete_objects(self, response, result): response.status_int = 204 def delete_properties(self, response, result): response.status_int = 204 def __render(self, json_data, response, response_status=None): body = jsonutils.dumps(json_data, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' if response_status: response.status_int = response_status return response def _get_base_definitions(): return get_schema_definitions() def get_schema_definitions(): return { "positiveInteger": { "type": "integer", "minimum": 0 }, "positiveIntegerDefault0": { "allOf": [ {"$ref": "#/definitions/positiveInteger"}, {"default": 0} ] }, "stringArray": { "type": "array", "items": {"type": "string"}, # "minItems": 1, "uniqueItems": True }, "property": { "type": "object", "additionalProperties": { "type": "object", "required": ["title", "type"], "properties": { "name": { "type": "string" }, "title": { "type": "string" }, "description": { "type": "string" }, "operators": { "type": "array", "items": { "type": "string" } }, "type": { "type": "string", "enum": [ "array", "boolean", "integer", "number", "object", "string", None ] }, "required": { "$ref": "#/definitions/stringArray" }, "minimum": { "type": "number" }, "maximum": { "type": "number" }, "maxLength": { "$ref": "#/definitions/positiveInteger" }, "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, "pattern": { "type": "string", "format": "regex" }, "enum": { "type": "array" }, "readonly": { "type": "boolean" }, "default": {}, "items": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "array", "boolean", "integer", "number", "object", "string", None ] }, "enum": { "type": "array" } } }, "maxItems": { "$ref": "#/definitions/positiveInteger" }, "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, "uniqueItems": { "type": "boolean", "default": False }, "additionalItems": { "type": "boolean" }, } } } } def _get_base_properties(): return { "namespace": { "type": "string", "description": _("The unique namespace text."), "maxLength": 80, }, "display_name": { "type": "string", "description": _("The user friendly name for the namespace. Used " "by UI if available."), "maxLength": 80, }, "description": { "type": "string", "description": _("Provides a user friendly description of the " "namespace."), "maxLength": 500, }, "visibility": { "type": "string", "description": _("Scope of namespace accessibility."), "enum": ["public", "private"], }, "protected": { "type": "boolean", "description": _("If true, namespace will not be deletable."), }, "owner": { "type": "string", "description": _("Owner of the namespace."), "maxLength": 255, }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of namespace creation"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last namespace" " modification"), "format": "date-time" }, "schema": { 'readOnly': True, "type": "string" }, "self": { 'readOnly': True, "type": "string" }, "resource_type_associations": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" }, "prefix": { "type": "string" }, "properties_target": { "type": "string" } } } }, "properties": { "$ref": "#/definitions/property" }, "objects": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" }, "description": { "type": "string" }, "required": { "$ref": "#/definitions/stringArray" }, "properties": { "$ref": "#/definitions/property" }, } } }, "tags": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } } } }, } def get_schema(): properties = _get_base_properties() definitions = _get_base_definitions() mandatory_attrs = Namespace.get_mandatory_attrs() schema = glance.schema.Schema( 'namespace', properties, required=mandatory_attrs, definitions=definitions ) return schema def get_collection_schema(): namespace_schema = get_schema() return glance.schema.CollectionSchema('namespaces', namespace_schema) def get_namespace_href(namespace): base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace return base_href def get_object_href(namespace_name, metadef_object): base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % (namespace_name, metadef_object.name)) return base_href def get_tag_href(namespace_name, metadef_tag): base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % (namespace_name, metadef_tag.name)) return base_href def create_resource(): """Namespaces resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = NamespaceController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/schemas.py0000664000567000056710000000755312701407047020526 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api.v2 import image_members from glance.api.v2 import images from glance.api.v2 import metadef_namespaces from glance.api.v2 import metadef_objects from glance.api.v2 import metadef_properties from glance.api.v2 import metadef_resource_types from glance.api.v2 import metadef_tags from glance.api.v2 import tasks from glance.common import wsgi class Controller(object): def __init__(self, custom_image_properties=None): self.image_schema = images.get_schema(custom_image_properties) self.image_collection_schema = images.get_collection_schema( custom_image_properties) self.member_schema = image_members.get_schema() self.member_collection_schema = image_members.get_collection_schema() self.task_schema = tasks.get_task_schema() self.task_collection_schema = tasks.get_collection_schema() # Metadef schemas self.metadef_namespace_schema = metadef_namespaces.get_schema() self.metadef_namespace_collection_schema = ( metadef_namespaces.get_collection_schema()) self.metadef_resource_type_schema = metadef_resource_types.get_schema() self.metadef_resource_type_collection_schema = ( metadef_resource_types.get_collection_schema()) self.metadef_property_schema = metadef_properties.get_schema() self.metadef_property_collection_schema = ( metadef_properties.get_collection_schema()) self.metadef_object_schema = metadef_objects.get_schema() self.metadef_object_collection_schema = ( metadef_objects.get_collection_schema()) self.metadef_tag_schema = metadef_tags.get_schema() self.metadef_tag_collection_schema = ( metadef_tags.get_collection_schema()) def image(self, req): return self.image_schema.raw() def images(self, req): return self.image_collection_schema.raw() def member(self, req): return self.member_schema.minimal() def members(self, req): return self.member_collection_schema.minimal() def task(self, req): return self.task_schema.minimal() def tasks(self, req): return self.task_collection_schema.minimal() def metadef_namespace(self, req): return self.metadef_namespace_schema.raw() def metadef_namespaces(self, req): return self.metadef_namespace_collection_schema.raw() def metadef_resource_type(self, req): return self.metadef_resource_type_schema.raw() def metadef_resource_types(self, req): return self.metadef_resource_type_collection_schema.raw() def metadef_property(self, req): return self.metadef_property_schema.raw() def metadef_properties(self, req): return self.metadef_property_collection_schema.raw() def metadef_object(self, req): return self.metadef_object_schema.raw() def metadef_objects(self, req): return self.metadef_object_collection_schema.raw() def metadef_tag(self, req): return self.metadef_tag_schema.raw() def metadef_tags(self, req): return self.metadef_tag_collection_schema.raw() def create_resource(custom_image_properties=None): controller = Controller(custom_image_properties) return wsgi.Resource(controller) glance-12.0.0/glance/api/v2/model/0000775000567000056710000000000012701407204017612 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/v2/model/__init__.py0000664000567000056710000000000012701407047021716 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/v2/model/metadef_tag.py0000664000567000056710000000216312701407047022433 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types from glance.common import wsme_utils class MetadefTag(types.Base, wsme_utils.WSMEModelTransformer): name = wsme.wsattr(types.text, mandatory=True) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) class MetadefTags(types.Base, wsme_utils.WSMEModelTransformer): tags = wsme.wsattr([MetadefTag], mandatory=False) glance-12.0.0/glance/api/v2/model/metadef_resource_type.py0000664000567000056710000000421212701407047024545 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types from glance.common.wsme_utils import WSMEModelTransformer class ResourceTypeAssociation(types.Base, WSMEModelTransformer): name = wsme.wsattr(types.text, mandatory=True) prefix = wsme.wsattr(types.text, mandatory=False) properties_target = wsme.wsattr(types.text, mandatory=False) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) def __init__(self, **kwargs): super(ResourceTypeAssociation, self).__init__(**kwargs) class ResourceTypeAssociations(types.Base, WSMEModelTransformer): resource_type_associations = wsme.wsattr([ResourceTypeAssociation], mandatory=False) def __init__(self, **kwargs): super(ResourceTypeAssociations, self).__init__(**kwargs) class ResourceType(types.Base, WSMEModelTransformer): name = wsme.wsattr(types.text, mandatory=True) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) def __init__(self, **kwargs): super(ResourceType, self).__init__(**kwargs) class ResourceTypes(types.Base, WSMEModelTransformer): resource_types = wsme.wsattr([ResourceType], mandatory=False) def __init__(self, **kwargs): super(ResourceTypes, self).__init__(**kwargs) glance-12.0.0/glance/api/v2/model/metadef_property_item_type.py0000664000567000056710000000161212701407047025621 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types class ItemType(types.Base): type = wsme.wsattr(types.text, mandatory=True) enum = wsme.wsattr([types.text], mandatory=False) _wsme_attr_order = ('type', 'enum') def __init__(self, **kwargs): super(ItemType, self).__init__(**kwargs) glance-12.0.0/glance/api/v2/model/metadef_namespace.py0000664000567000056710000000571512701407047023622 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme.rest import json from wsme import types from glance.api.v2.model.metadef_object import MetadefObject from glance.api.v2.model.metadef_property_type import PropertyType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_tag import MetadefTag from glance.common.wsme_utils import WSMEModelTransformer class Namespace(types.Base, WSMEModelTransformer): # Base fields namespace = wsme.wsattr(types.text, mandatory=True) display_name = wsme.wsattr(types.text, mandatory=False) description = wsme.wsattr(types.text, mandatory=False) visibility = wsme.wsattr(types.text, mandatory=False) protected = wsme.wsattr(bool, mandatory=False) owner = wsme.wsattr(types.text, mandatory=False) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) # Contained fields resource_type_associations = wsme.wsattr([ResourceTypeAssociation], mandatory=False) properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) objects = wsme.wsattr([MetadefObject], mandatory=False) tags = wsme.wsattr([MetadefTag], mandatory=False) # Generated fields self = wsme.wsattr(types.text, mandatory=False) schema = wsme.wsattr(types.text, mandatory=False) def __init__(cls, **kwargs): super(Namespace, cls).__init__(**kwargs) @staticmethod def to_model_properties(db_property_types): property_types = {} for db_property_type in db_property_types: # Convert the persisted json schema to a dict of PropertyTypes property_type = json.fromjson( PropertyType, db_property_type.schema) property_type_name = db_property_type.name property_types[property_type_name] = property_type return property_types class Namespaces(types.Base, WSMEModelTransformer): namespaces = wsme.wsattr([Namespace], mandatory=False) # Pagination next = wsme.wsattr(types.text, mandatory=False) schema = wsme.wsattr(types.text, mandatory=True) first = wsme.wsattr(types.text, mandatory=True) def __init__(self, **kwargs): super(Namespaces, self).__init__(**kwargs) glance-12.0.0/glance/api/v2/model/metadef_object.py0000664000567000056710000000335012701407047023125 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types from glance.api.v2.model.metadef_property_type import PropertyType from glance.common.wsme_utils import WSMEModelTransformer class MetadefObject(types.Base, WSMEModelTransformer): name = wsme.wsattr(types.text, mandatory=True) required = wsme.wsattr([types.text], mandatory=False) description = wsme.wsattr(types.text, mandatory=False) properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) # Not using datetime since time format has to be # in oslo_utils.timeutils.isotime() format created_at = wsme.wsattr(types.text, mandatory=False) updated_at = wsme.wsattr(types.text, mandatory=False) # Generated fields self = wsme.wsattr(types.text, mandatory=False) schema = wsme.wsattr(types.text, mandatory=False) def __init__(cls, **kwargs): super(MetadefObject, cls).__init__(**kwargs) class MetadefObjects(types.Base, WSMEModelTransformer): objects = wsme.wsattr([MetadefObject], mandatory=False) schema = wsme.wsattr(types.text, mandatory=True) def __init__(self, **kwargs): super(MetadefObjects, self).__init__(**kwargs) glance-12.0.0/glance/api/v2/model/metadef_property_type.py0000664000567000056710000000446212701407047024611 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import wsme from wsme import types from glance.api.v2.model.metadef_property_item_type import ItemType from glance.common.wsme_utils import WSMEModelTransformer class PropertyType(types.Base, WSMEModelTransformer): # When used in collection of PropertyTypes, name is a dictionary key # and not included as separate field. name = wsme.wsattr(types.text, mandatory=False) type = wsme.wsattr(types.text, mandatory=True) title = wsme.wsattr(types.text, mandatory=True) description = wsme.wsattr(types.text, mandatory=False) operators = wsme.wsattr([types.text], mandatory=False) default = wsme.wsattr(types.bytes, mandatory=False) readonly = wsme.wsattr(bool, mandatory=False) # fields for type = string minimum = wsme.wsattr(int, mandatory=False) maximum = wsme.wsattr(int, mandatory=False) enum = wsme.wsattr([types.text], mandatory=False) pattern = wsme.wsattr(types.text, mandatory=False) # fields for type = integer, number minLength = wsme.wsattr(int, mandatory=False) maxLength = wsme.wsattr(int, mandatory=False) confidential = wsme.wsattr(bool, mandatory=False) # fields for type = array items = wsme.wsattr(ItemType, mandatory=False) uniqueItems = wsme.wsattr(bool, mandatory=False) minItems = wsme.wsattr(int, mandatory=False) maxItems = wsme.wsattr(int, mandatory=False) additionalItems = wsme.wsattr(bool, mandatory=False) def __init__(self, **kwargs): super(PropertyType, self).__init__(**kwargs) class PropertyTypes(types.Base, WSMEModelTransformer): properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) def __init__(self, **kwargs): super(PropertyTypes, self).__init__(**kwargs) glance-12.0.0/glance/api/v2/image_members.py0000664000567000056710000003417512701407051021672 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import glance_store from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import six import webob from glance.api import policy from glance.common import exception from glance.common import timeutils from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class ImageMembersController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) def _get_member_repo(self, req, image): try: # For public images, a forbidden exception with message # "Public images do not have members" is thrown. return self.gateway.get_member_repo(image, req.context) except exception.Forbidden as e: msg = (_("Error fetching members of image %(image_id)s: " "%(inner_msg)s") % {"image_id": image.image_id, "inner_msg": e.msg}) LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) def _lookup_image(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: return image_repo.get(image_id) except (exception.NotFound): msg = _("Image %s not found.") % image_id LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = _("You are not authorized to lookup image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) def _lookup_member(self, req, image, member_id): member_repo = self._get_member_repo(req, image) try: return member_repo.get(member_id) except (exception.NotFound): msg = (_("%(m_id)s not found in the member list of the image " "%(i_id)s.") % {"m_id": member_id, "i_id": image.image_id}) LOG.warning(msg) raise webob.exc.HTTPNotFound(explanation=msg) except exception.Forbidden: msg = (_("You are not authorized to lookup the members of the " "image %s.") % image.image_id) LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) @utils.mutating def create(self, req, image_id, member_id): """ Adds a membership to the image. :param req: the Request object coming from the wsgi layer :param image_id: the image identifier :param member_id: the member identifier :retval The response body is a mapping of the following form:: {'member_id': , 'image_id': , 'status': 'created_at': .., 'updated_at': ..} """ image = self._lookup_image(req, image_id) member_repo = self._get_member_repo(req, image) image_member_factory = self.gateway.get_image_member_factory( req.context) try: new_member = image_member_factory.new_image_member(image, member_id) member_repo.add(new_member) return new_member except exception.Forbidden: msg = _("Not allowed to create members for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except exception.Duplicate: msg = _("Member %(member_id)s is duplicated for image " "%(image_id)s") % {"member_id": member_id, "image_id": image_id} LOG.warning(msg) raise webob.exc.HTTPConflict(explanation=msg) except exception.ImageMemberLimitExceeded as e: msg = (_("Image member limit exceeded for image %(id)s: %(e)s:") % {"id": image_id, "e": encodeutils.exception_to_unicode(e)}) LOG.warning(msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) @utils.mutating def update(self, req, image_id, member_id, status): """ Adds a membership to the image. :param req: the Request object coming from the wsgi layer :param image_id: the image identifier :param member_id: the member identifier :retval The response body is a mapping of the following form:: {'member_id': , 'image_id': , 'status': 'created_at': .., 'updated_at': ..} """ image = self._lookup_image(req, image_id) member_repo = self._get_member_repo(req, image) member = self._lookup_member(req, image, member_id) try: member.status = status member_repo.save(member) return member except exception.Forbidden: msg = _("Not allowed to update members for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) except ValueError as e: msg = (_("Incorrect request: %s") % encodeutils.exception_to_unicode(e)) LOG.warning(msg) raise webob.exc.HTTPBadRequest(explanation=msg) def index(self, req, image_id): """ Return a list of dictionaries indicating the members of the image, i.e., those tenants the image is shared with. :param req: the Request object coming from the wsgi layer :param image_id: The image identifier :retval The response body is a mapping of the following form:: {'members': [ {'member_id': , 'image_id': , 'status': 'created_at': .., 'updated_at': ..}, .. ]} """ image = self._lookup_image(req, image_id) member_repo = self._get_member_repo(req, image) members = [] try: for member in member_repo.list(): members.append(member) except exception.Forbidden: msg = _("Not allowed to list members for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) return dict(members=members) def show(self, req, image_id, member_id): """ Returns the membership of the tenant wrt to the image_id specified. :param req: the Request object coming from the wsgi layer :param image_id: The image identifier :retval The response body is a mapping of the following form:: {'member_id': , 'image_id': , 'status': 'created_at': .., 'updated_at': ..} """ try: image = self._lookup_image(req, image_id) return self._lookup_member(req, image, member_id) except webob.exc.HTTPForbidden as e: # Convert Forbidden to NotFound to prevent information # leakage. raise webob.exc.HTTPNotFound(explanation=e.explanation) @utils.mutating def delete(self, req, image_id, member_id): """ Removes a membership from the image. """ image = self._lookup_image(req, image_id) member_repo = self._get_member_repo(req, image) member = self._lookup_member(req, image, member_id) try: member_repo.remove(member) return webob.Response(body='', status=204) except exception.Forbidden: msg = _("Not allowed to delete members for image %s.") % image_id LOG.warning(msg) raise webob.exc.HTTPForbidden(explanation=msg) class RequestDeserializer(wsgi.JSONRequestDeserializer): def __init__(self): super(RequestDeserializer, self).__init__() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def create(self, request): body = self._get_request_body(request) try: member_id = body['member'] if not member_id: raise ValueError() except KeyError: msg = _("Member to be added not specified") raise webob.exc.HTTPBadRequest(explanation=msg) except ValueError: msg = _("Member can't be empty") raise webob.exc.HTTPBadRequest(explanation=msg) except TypeError: msg = _('Expected a member in the form: ' '{"member": "image_id"}') raise webob.exc.HTTPBadRequest(explanation=msg) return dict(member_id=member_id) def update(self, request): body = self._get_request_body(request) try: status = body['status'] except KeyError: msg = _("Status not specified") raise webob.exc.HTTPBadRequest(explanation=msg) except TypeError: msg = _('Expected a status in the form: ' '{"status": "status"}') raise webob.exc.HTTPBadRequest(explanation=msg) return dict(status=status) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def _format_image_member(self, member): member_view = {} attributes = ['member_id', 'image_id', 'status'] for key in attributes: member_view[key] = getattr(member, key) member_view['created_at'] = timeutils.isotime(member.created_at) member_view['updated_at'] = timeutils.isotime(member.updated_at) member_view['schema'] = '/v2/schemas/member' member_view = self.schema.filter(member_view) return member_view def create(self, response, image_member): image_member_view = self._format_image_member(image_member) body = jsonutils.dumps(image_member_view, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def update(self, response, image_member): image_member_view = self._format_image_member(image_member) body = jsonutils.dumps(image_member_view, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def index(self, response, image_members): image_members = image_members['members'] image_members_view = [] for image_member in image_members: image_member_view = self._format_image_member(image_member) image_members_view.append(image_member_view) totalview = dict(members=image_members_view) totalview['schema'] = '/v2/schemas/members' body = jsonutils.dumps(totalview, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def show(self, response, image_member): image_member_view = self._format_image_member(image_member) body = jsonutils.dumps(image_member_view, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' _MEMBER_SCHEMA = { 'member_id': { 'type': 'string', 'description': _('An identifier for the image member (tenantId)') }, 'image_id': { 'type': 'string', 'description': _('An identifier for the image'), 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'created_at': { 'type': 'string', 'description': _('Date and time of image member creation'), # TODO(brian-rosmaita): our jsonschema library doesn't seem to like the # format attribute, figure out why (and also fix in images.py) # 'format': 'date-time', }, 'updated_at': { 'type': 'string', 'description': _('Date and time of last modification of image member'), # 'format': 'date-time', }, 'status': { 'type': 'string', 'description': _('The status of this image member'), 'enum': [ 'pending', 'accepted', 'rejected' ] }, 'schema': { 'readOnly': True, 'type': 'string' } } def get_schema(): properties = copy.deepcopy(_MEMBER_SCHEMA) schema = glance.schema.Schema('member', properties) return schema def get_collection_schema(): member_schema = get_schema() return glance.schema.CollectionSchema('members', member_schema) def create_resource(): """Image Members resource factory method""" deserializer = RequestDeserializer() serializer = ResponseSerializer() controller = ImageMembersController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/metadef_properties.py0000664000567000056710000003027212701407047022756 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import six import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2 import metadef_namespaces as namespaces from glance.api.v2.model.metadef_namespace import Namespace from glance.api.v2.model.metadef_property_type import PropertyType from glance.api.v2.model.metadef_property_type import PropertyTypes from glance.common import exception from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class NamespacePropertiesController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) def _to_dict(self, model_property_type): # Convert the model PropertyTypes dict to a JSON encoding db_property_type_dict = dict() db_property_type_dict['schema'] = json.tojson( PropertyType, model_property_type) db_property_type_dict['name'] = model_property_type.name return db_property_type_dict def _to_model(self, db_property_type): # Convert the persisted json schema to a dict of PropertyTypes property_type = json.fromjson( PropertyType, db_property_type.schema) property_type.name = db_property_type.name return property_type def index(self, req, namespace): try: filters = dict() filters['namespace'] = namespace prop_repo = self.gateway.get_metadef_property_repo(req.context) db_properties = prop_repo.list(filters=filters) property_list = Namespace.to_model_properties(db_properties) namespace_properties = PropertyTypes() namespace_properties.properties = property_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata properties " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return namespace_properties def show(self, req, namespace, property_name, filters=None): try: if filters and filters['resource_type']: rs_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type = rs_repo.get(filters['resource_type'], namespace) prefix = db_resource_type.prefix if prefix and property_name.startswith(prefix): property_name = property_name[len(prefix):] else: msg = (_("Property %(property_name)s does not start " "with the expected resource type association " "prefix of '%(prefix)s'.") % {'property_name': property_name, 'prefix': prefix}) raise exception.NotFound(msg) prop_repo = self.gateway.get_metadef_property_repo(req.context) db_property = prop_repo.get(namespace, property_name) property = self._to_model(db_property) except exception.Forbidden as e: LOG.debug("User not permitted to show metadata property '%s' " "within '%s' namespace", property_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return property def create(self, req, namespace, property_type): prop_factory = self.gateway.get_metadef_property_factory(req.context) prop_repo = self.gateway.get_metadef_property_repo(req.context) try: new_property_type = prop_factory.new_namespace_property( namespace=namespace, **self._to_dict(property_type)) prop_repo.add(new_property_type) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata property within " "'%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return self._to_model(new_property_type) def update(self, req, namespace, property_name, property_type): prop_repo = self.gateway.get_metadef_property_repo(req.context) try: db_property_type = prop_repo.get(namespace, property_name) db_property_type._old_name = db_property_type.name db_property_type.name = property_type.name db_property_type.schema = (self._to_dict(property_type))['schema'] updated_property_type = prop_repo.save(db_property_type) except exception.Forbidden as e: LOG.debug("User not permitted to update metadata property '%s' " "within '%s' namespace", property_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return self._to_model(updated_property_type) def delete(self, req, namespace, property_name): prop_repo = self.gateway.get_metadef_property_repo(req.context) try: property_type = prop_repo.get(namespace, property_name) property_type.delete() prop_repo.remove(property_type) except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata property '%s' " "within '%s' namespace", property_name, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) property_type = json.fromjson(PropertyType, body) return dict(property_type=property_type) def update(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) property_type = json.fromjson(PropertyType, body) return dict(property_type=property_type) def show(self, request): params = request.params.copy() query_params = { 'filters': params } return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def show(self, response, result): property_type_json = json.tojson(PropertyType, result) body = jsonutils.dumps(property_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def index(self, response, result): property_type_json = json.tojson(PropertyTypes, result) body = jsonutils.dumps(property_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def create(self, response, result): response.status_int = 201 self.show(response, result) def update(self, response, result): response.status_int = 200 self.show(response, result) def delete(self, response, result): response.status_int = 204 def _get_base_definitions(): return { "positiveInteger": { "type": "integer", "minimum": 0 }, "positiveIntegerDefault0": { "allOf": [ {"$ref": "#/definitions/positiveInteger"}, {"default": 0} ] }, "stringArray": { "type": "array", "items": {"type": "string"}, "minItems": 1, "uniqueItems": True } } def _get_base_properties(): base_def = namespaces.get_schema_definitions() return base_def['property']['additionalProperties']['properties'] def get_schema(): definitions = _get_base_definitions() properties = _get_base_properties() mandatory_attrs = PropertyType.get_mandatory_attrs() # name is required attribute when use as single property type mandatory_attrs.append('name') schema = glance.schema.Schema( 'property', properties, required=mandatory_attrs, definitions=definitions ) return schema def get_collection_schema(): namespace_properties_schema = get_schema() # Property name is a dict key and not a required attribute in # individual property schema inside property collections namespace_properties_schema.required.remove('name') return glance.schema.DictCollectionSchema('properties', namespace_properties_schema) def create_resource(): """NamespaceProperties resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = NamespacePropertiesController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/image_actions.py0000664000567000056710000000643212701407047021700 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from oslo_log import log as logging import webob.exc from glance.api import policy from glance.common import exception from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _LI import glance.notifier LOG = logging.getLogger(__name__) class ImageActionsController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) @utils.mutating def deactivate(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) image.deactivate() image_repo.save(image) LOG.info(_LI("Image %s is deactivated"), image_id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to deactivate image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) @utils.mutating def reactivate(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) image.reactivate() image_repo.save(image) LOG.info(_LI("Image %s is reactivated"), image_id) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to reactivate image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) class ResponseSerializer(wsgi.JSONResponseSerializer): def deactivate(self, response, result): response.status_int = 204 def reactivate(self, response, result): response.status_int = 204 def create_resource(): """Image data resource factory method""" deserializer = None serializer = ResponseSerializer() controller = ImageActionsController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/images.py0000664000567000056710000012016112701407047020337 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import glance_store from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json from oslo_utils import encodeutils import six import six.moves.urllib.parse as urlparse import webob.exc from glance.api import policy from glance.common import exception from glance.common import location_strategy from glance.common import timeutils from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _, _LW import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('disk_formats', 'glance.common.config', group='image_format') CONF.import_opt('container_formats', 'glance.common.config', group='image_format') CONF.import_opt('show_multiple_locations', 'glance.common.config') class ImagesController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) @utils.mutating def create(self, req, image, extra_properties, tags): image_factory = self.gateway.get_image_factory(req.context) image_repo = self.gateway.get_repo(req.context) try: image = image_factory.new_image(extra_properties=extra_properties, tags=tags, **image) image_repo.add(image) except (exception.DuplicateLocation, exception.Invalid) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except (exception.ReservedProperty, exception.ReadonlyProperty) as e: raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to create image") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.LimitExceeded as e: LOG.warn(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPRequestEntityTooLarge( explanation=e.msg, request=req, content_type='text/plain') except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) except TypeError as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=e) return image def index(self, req, marker=None, limit=None, sort_key=None, sort_dir=None, filters=None, member_status='accepted'): sort_key = ['created_at'] if not sort_key else sort_key sort_dir = ['desc'] if not sort_dir else sort_dir result = {} if filters is None: filters = {} filters['deleted'] = False if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, limit) image_repo = self.gateway.get_repo(req.context) try: images = image_repo.list(marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, filters=filters, member_status=member_status) if len(images) != 0 and len(images) == limit: result['next_marker'] = images[-1].image_id except (exception.NotFound, exception.InvalidSortKey, exception.InvalidFilterRangeValue, exception.InvalidParameterValue, exception.InvalidFilterOperatorValue) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to retrieve images index") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) result['images'] = images return result def show(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: return image_repo.get(image_id) except exception.Forbidden as e: LOG.debug("User not permitted to show image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) @utils.mutating def update(self, req, image_id, changes): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) for change in changes: change_method_name = '_do_%s' % change['op'] change_method = getattr(self, change_method_name) change_method(req, image, change) if changes: image_repo.save(image) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except (exception.Invalid, exception.BadStoreUri) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to update image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.StorageQuotaFull as e: msg = (_("Denying attempt to upload image because it exceeds the" " quota: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) raise webob.exc.HTTPRequestEntityTooLarge( explanation=msg, request=req, content_type='text/plain') except exception.LimitExceeded as e: LOG.exception(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPRequestEntityTooLarge( explanation=e.msg, request=req, content_type='text/plain') except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) return image def _do_replace(self, req, image, change): path = change['path'] path_root = path[0] value = change['value'] if path_root == 'locations' and value == []: msg = _("Cannot set locations to empty list.") raise webob.exc.HTTPForbidden(msg) elif path_root == 'locations' and value != []: self._do_replace_locations(image, value) elif path_root == 'owner' and req.context.is_admin == False: msg = _("Owner can't be updated by non admin.") raise webob.exc.HTTPForbidden(msg) else: if hasattr(image, path_root): setattr(image, path_root, value) elif path_root in image.extra_properties: image.extra_properties[path_root] = value else: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % path_root) def _do_add(self, req, image, change): path = change['path'] path_root = path[0] value = change['value'] json_schema_version = change.get('json_schema_version', 10) if path_root == 'locations': self._do_add_locations(image, path[1], value) else: if ((hasattr(image, path_root) or path_root in image.extra_properties) and json_schema_version == 4): msg = _("Property %s already present.") raise webob.exc.HTTPConflict(msg % path_root) if hasattr(image, path_root): setattr(image, path_root, value) else: image.extra_properties[path_root] = value def _do_remove(self, req, image, change): path = change['path'] path_root = path[0] if path_root == 'locations': try: self._do_remove_locations(image, path[1]) except exception.Forbidden as e: raise webob.exc.HTTPForbidden(e.msg) else: if hasattr(image, path_root): msg = _("Property %s may not be removed.") raise webob.exc.HTTPForbidden(msg % path_root) elif path_root in image.extra_properties: del image.extra_properties[path_root] else: msg = _("Property %s does not exist.") raise webob.exc.HTTPConflict(msg % path_root) @utils.mutating def delete(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) image.delete() image_repo.remove(image) except (glance_store.Forbidden, exception.Forbidden) as e: LOG.debug("User not permitted to delete image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) except (glance_store.NotFound, exception.NotFound) as e: msg = (_("Failed to find image %(image_id)s to delete") % {'image_id': image_id}) LOG.warn(msg) raise webob.exc.HTTPNotFound(explanation=msg) except glance_store.exceptions.InUseByStore as e: msg = (_("Image %(id)s could not be deleted " "because it is in use: %(exc)s") % {"id": image_id, "exc": e.msg}) LOG.warn(msg) raise webob.exc.HTTPConflict(explanation=msg) except glance_store.exceptions.HasSnapshot as e: raise webob.exc.HTTPConflict(explanation=e.msg) except exception.InvalidImageStatusTransition as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotAuthenticated as e: raise webob.exc.HTTPUnauthorized(explanation=e.msg) def _get_locations_op_pos(self, path_pos, max_pos, allow_max): if path_pos is None or max_pos is None: return None pos = max_pos if allow_max else max_pos - 1 if path_pos.isdigit(): pos = int(path_pos) elif path_pos != '-': return None if not (allow_max or 0 <= pos < max_pos): return None return pos def _do_replace_locations(self, image, value): if CONF.show_multiple_locations == False: msg = _("It's not allowed to update locations if locations are " "invisible.") raise webob.exc.HTTPForbidden(explanation=msg) try: # NOTE(flwang): _locations_proxy's setattr method will check if # the update is acceptable. image.locations = value if image.status == 'queued': image.status = 'active' except (exception.BadStoreUri, exception.DuplicateLocation) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except ValueError as ve: # update image status failed. raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(ve)) def _do_add_locations(self, image, path_pos, value): if CONF.show_multiple_locations == False: msg = _("It's not allowed to add locations if locations are " "invisible.") raise webob.exc.HTTPForbidden(explanation=msg) pos = self._get_locations_op_pos(path_pos, len(image.locations), True) if pos is None: msg = _("Invalid position for adding a location.") raise webob.exc.HTTPBadRequest(explanation=msg) try: image.locations.insert(pos, value) if image.status == 'queued': image.status = 'active' except (exception.BadStoreUri, exception.DuplicateLocation) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except ValueError as e: # update image status failed. raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) def _do_remove_locations(self, image, path_pos): if CONF.show_multiple_locations == False: msg = _("It's not allowed to remove locations if locations are " "invisible.") raise webob.exc.HTTPForbidden(explanation=msg) if len(image.locations) == 1: LOG.debug("User forbidden to remove last location of image %s", image.image_id) msg = _("Cannot remove last location in the image.") raise exception.Forbidden(msg) pos = self._get_locations_op_pos(path_pos, len(image.locations), False) if pos is None: msg = _("Invalid position for removing a location.") raise webob.exc.HTTPBadRequest(explanation=msg) try: # NOTE(zhiyan): this actually deletes the location # from the backend store. image.locations.pop(pos) # TODO(jokke): Fix this, we should catch what store throws and # provide definitely something else than IternalServerError to user. except Exception as e: raise webob.exc.HTTPInternalServerError( explanation=encodeutils.exception_to_unicode(e)) class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ('direct_url', 'self', 'file', 'schema') _readonly_properties = ('created_at', 'updated_at', 'status', 'checksum', 'size', 'virtual_size', 'direct_url', 'self', 'file', 'schema', 'id') _reserved_properties = ('location', 'deleted', 'deleted_at') _base_properties = ('checksum', 'created_at', 'container_format', 'disk_format', 'id', 'min_disk', 'min_ram', 'name', 'size', 'virtual_size', 'status', 'tags', 'owner', 'updated_at', 'visibility', 'protected') _available_sort_keys = ('name', 'status', 'container_format', 'disk_format', 'size', 'id', 'created_at', 'updated_at') _default_sort_key = 'created_at' _default_sort_dir = 'desc' _path_depth_limits = {'locations': {'add': 2, 'remove': 2, 'replace': 1}} _supported_operations = ('add', 'remove', 'replace') def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden( explanation=six.text_type(msg)) def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) image = {} properties = body tags = properties.pop('tags', []) for key in self._base_properties: try: # NOTE(flwang): Instead of changing the _check_unexpected # of ImageFactory. It would be better to do the mapping # at here. if key == 'id': image['image_id'] = properties.pop(key) else: image[key] = properties.pop(key) except KeyError: pass return dict(image=image, extra_properties=properties, tags=tags) def _get_change_operation_d10(self, raw_change): op = raw_change.get('op') if op is None: msg = (_('Unable to find `op` in JSON Schema change. ' 'It must be one of the following: %(available)s.') % {'available': ', '.join(self._supported_operations)}) raise webob.exc.HTTPBadRequest(explanation=msg) if op not in self._supported_operations: msg = (_('Invalid operation: `%(op)s`. ' 'It must be one of the following: %(available)s.') % {'op': op, 'available': ', '.join(self._supported_operations)}) raise webob.exc.HTTPBadRequest(explanation=msg) return op def _get_change_operation_d4(self, raw_change): op = None for key in self._supported_operations: if key in raw_change: if op is not None: msg = _('Operation objects must contain only one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) op = key if op is None: msg = _('Operation objects must contain exactly one member' ' named "add", "remove", or "replace".') raise webob.exc.HTTPBadRequest(explanation=msg) return op def _get_change_path_d10(self, raw_change): try: return raw_change['path'] except KeyError: msg = _("Unable to find '%s' in JSON Schema change") % 'path' raise webob.exc.HTTPBadRequest(explanation=msg) def _get_change_path_d4(self, raw_change, op): return raw_change[op] def _decode_json_pointer(self, pointer): """Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0". """ self._validate_json_pointer(pointer) ret = [] for part in pointer.lstrip('/').split('/'): ret.append(part.replace('~1', '/').replace('~0', '~').strip()) return ret def _validate_json_pointer(self, pointer): """Validate a json pointer. We only accept a limited form of json pointers. """ if not pointer.startswith('/'): msg = _('Pointer `%s` does not start with "/".') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) if re.search('/\s*?/', pointer[1:]): msg = _('Pointer `%s` contains adjacent "/".') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) if len(pointer) > 1 and pointer.endswith('/'): msg = _('Pointer `%s` end with "/".') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) if pointer[1:].strip() == '/': msg = _('Pointer `%s` does not contains valid token.') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) if re.search('~[^01]', pointer) or pointer.endswith('~'): msg = _('Pointer `%s` contains "~" not part of' ' a recognized escape sequence.') % pointer raise webob.exc.HTTPBadRequest(explanation=msg) def _get_change_value(self, raw_change, op): if 'value' not in raw_change: msg = _('Operation "%s" requires a member named "value".') raise webob.exc.HTTPBadRequest(explanation=msg % op) return raw_change['value'] def _validate_change(self, change): path_root = change['path'][0] if path_root in self._readonly_properties: msg = _("Attribute '%s' is read-only.") % path_root raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) if path_root in self._reserved_properties: msg = _("Attribute '%s' is reserved.") % path_root raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) if change['op'] == 'remove': return partial_image = None if len(change['path']) == 1: partial_image = {path_root: change['value']} elif ((path_root in get_base_properties().keys()) and (get_base_properties()[path_root].get('type', '') == 'array')): # NOTE(zhiyan): cient can use PATCH API to adding element to # the image's existing set property directly. # Such as: 1. using '/locations/N' path to adding a location # to the image's 'locations' list at N position. # (implemented) # 2. using '/tags/-' path to appending a tag to the # image's 'tags' list at last. (Not implemented) partial_image = {path_root: [change['value']]} if partial_image: try: self.schema.validate(partial_image) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) def _validate_path(self, op, path): path_root = path[0] limits = self._path_depth_limits.get(path_root, {}) if len(path) != limits.get(op, 1): msg = _("Invalid JSON pointer for this resource: " "'/%s'") % '/'.join(path) raise webob.exc.HTTPBadRequest(explanation=six.text_type(msg)) def _parse_json_schema_change(self, raw_change, draft_version): if draft_version == 10: op = self._get_change_operation_d10(raw_change) path = self._get_change_path_d10(raw_change) elif draft_version == 4: op = self._get_change_operation_d4(raw_change) path = self._get_change_path_d4(raw_change, op) else: msg = _('Unrecognized JSON Schema draft version') raise webob.exc.HTTPBadRequest(explanation=msg) path_list = self._decode_json_pointer(path) return op, path_list def update(self, request): changes = [] content_types = { 'application/openstack-images-v2.0-json-patch': 4, 'application/openstack-images-v2.1-json-patch': 10, } if request.content_type not in content_types: headers = {'Accept-Patch': ', '.join(sorted(content_types.keys()))} raise webob.exc.HTTPUnsupportedMediaType(headers=headers) json_schema_version = content_types[request.content_type] body = self._get_request_body(request) if not isinstance(body, list): msg = _('Request body must be a JSON array of operation objects.') raise webob.exc.HTTPBadRequest(explanation=msg) for raw_change in body: if not isinstance(raw_change, dict): msg = _('Operations must be JSON objects.') raise webob.exc.HTTPBadRequest(explanation=msg) (op, path) = self._parse_json_schema_change(raw_change, json_schema_version) # NOTE(zhiyan): the 'path' is a list. self._validate_path(op, path) change = {'op': op, 'path': path, 'json_schema_version': json_schema_version} if not op == 'remove': change['value'] = self._get_change_value(raw_change, op) self._validate_change(change) changes.append(change) return {'changes': changes} def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_sort_key(self, sort_key): if sort_key not in self._available_sort_keys: msg = _('Invalid sort key: %(sort_key)s. ' 'It must be one of the following: %(available)s.') % ( {'sort_key': sort_key, 'available': ', '.join(self._available_sort_keys)}) raise webob.exc.HTTPBadRequest(explanation=msg) return sort_key def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _validate_member_status(self, member_status): if member_status not in ['pending', 'accepted', 'rejected', 'all']: msg = _('Invalid status: %s') % member_status raise webob.exc.HTTPBadRequest(explanation=msg) return member_status def _get_filters(self, filters): visibility = filters.get('visibility') if visibility: if visibility not in ['public', 'private', 'shared']: msg = _('Invalid visibility value: %s') % visibility raise webob.exc.HTTPBadRequest(explanation=msg) changes_since = filters.get('changes-since', None) if changes_since: msg = _('The "changes-since" filter is no longer available on v2.') raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _get_sorting_params(self, params): """ Process sorting params. Currently glance supports two sorting syntax: classic and new one, that is uniform for all OpenStack projects. Classic syntax: sort_key=name&sort_dir=asc&sort_key=size&sort_dir=desc New syntax: sort=name:asc,size:desc """ sort_keys = [] sort_dirs = [] if 'sort' in params: # use new sorting syntax here if 'sort_key' in params or 'sort_dir' in params: msg = _('Old and new sorting syntax cannot be combined') raise webob.exc.HTTPBadRequest(explanation=msg) for sort_param in params.pop('sort').strip().split(','): key, _sep, dir = sort_param.partition(':') if not dir: dir = self._default_sort_dir sort_keys.append(self._validate_sort_key(key.strip())) sort_dirs.append(self._validate_sort_dir(dir.strip())) else: # continue with classic syntax # NOTE(mfedosin): we have 3 options here: # 1. sort_dir wasn't passed: we use default one - 'desc'. # 2. Only one sort_dir was passed: use it for every sort_key # in the list. # 3. Multiple sort_dirs were passed: consistently apply each one to # the corresponding sort_key. # If number of sort_dirs and sort_keys doesn't match then raise an # exception. while 'sort_key' in params: sort_keys.append(self._validate_sort_key( params.pop('sort_key').strip())) while 'sort_dir' in params: sort_dirs.append(self._validate_sort_dir( params.pop('sort_dir').strip())) if sort_dirs: dir_len = len(sort_dirs) key_len = len(sort_keys) if dir_len > 1 and dir_len != key_len: msg = _('Number of sort dirs does not match the number ' 'of sort keys') raise webob.exc.HTTPBadRequest(explanation=msg) if not sort_keys: sort_keys = [self._default_sort_key] if not sort_dirs: sort_dirs = [self._default_sort_dir] return sort_keys, sort_dirs def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) member_status = params.pop('member_status', 'accepted') # NOTE (flwang) To avoid using comma or any predefined chars to split # multiple tags, now we allow user specify multiple 'tag' parameters # in URL, such as v2/images?tag=x86&tag=64bit. tags = [] while 'tag' in params: tags.append(params.pop('tag').strip()) query_params = { 'filters': self._get_filters(params), 'member_status': self._validate_member_status(member_status), } if marker is not None: query_params['marker'] = marker if limit is not None: query_params['limit'] = self._validate_limit(limit) if tags: query_params['filters']['tags'] = tags # NOTE(mfedosin): param is still called sort_key and sort_dir, # instead of sort_keys and sort_dirs respectively. # It's done because in v1 it's still a single value. query_params['sort_key'], query_params['sort_dir'] = ( self._get_sorting_params(params)) return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema or get_schema() def _get_image_href(self, image, subcollection=''): base_href = '/v2/images/%s' % image.image_id if subcollection: base_href = '%s/%s' % (base_href, subcollection) return base_href def _format_image(self, image): def _get_image_locations(image): try: return list(image.locations) except exception.Forbidden: return [] try: image_view = dict(image.extra_properties) attributes = ['name', 'disk_format', 'container_format', 'visibility', 'size', 'virtual_size', 'status', 'checksum', 'protected', 'min_ram', 'min_disk', 'owner'] for key in attributes: image_view[key] = getattr(image, key) image_view['id'] = image.image_id image_view['created_at'] = timeutils.isotime(image.created_at) image_view['updated_at'] = timeutils.isotime(image.updated_at) if CONF.show_multiple_locations: locations = _get_image_locations(image) if locations: image_view['locations'] = [] for loc in locations: tmp = dict(loc) tmp.pop('id', None) tmp.pop('status', None) image_view['locations'].append(tmp) else: # NOTE (flwang): We will still show "locations": [] if # image.locations is None to indicate it's allowed to show # locations but it's just non-existent. image_view['locations'] = [] LOG.debug("There is not available location " "for image %s", image.image_id) if CONF.show_image_direct_url: locations = _get_image_locations(image) if locations: # Choose best location configured strategy l = location_strategy.choose_best_location(locations) image_view['direct_url'] = l['url'] else: LOG.debug("There is not available location " "for image %s", image.image_id) image_view['tags'] = list(image.tags) image_view['self'] = self._get_image_href(image) image_view['file'] = self._get_image_href(image, 'file') image_view['schema'] = '/v2/schemas/image' image_view = self.schema.filter(image_view) # domain return image_view except exception.Forbidden as e: raise webob.exc.HTTPForbidden(explanation=e.msg) def create(self, response, image): response.status_int = 201 self.show(response, image) response.location = self._get_image_href(image) def show(self, response, image): image_view = self._format_image(image) body = json.dumps(image_view, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def update(self, response, image): image_view = self._format_image(image) body = json.dumps(image_view, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urlparse.urlencode(params) body = { 'images': [self._format_image(i) for i in result['images']], 'first': '/v2/images', 'schema': '/v2/schemas/images', } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in result: params['marker'] = result['next_marker'] next_query = urlparse.urlencode(params) body['next'] = '/v2/images?%s' % next_query response.unicode_body = six.text_type(json.dumps(body, ensure_ascii=False)) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 def get_base_properties(): return { 'id': { 'type': 'string', 'description': _('An identifier for the image'), 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), }, 'name': { 'type': ['null', 'string'], 'description': _('Descriptive name for the image'), 'maxLength': 255, }, 'status': { 'type': 'string', 'readOnly': True, 'description': _('Status of the image'), 'enum': ['queued', 'saving', 'active', 'killed', 'deleted', 'pending_delete', 'deactivated'], }, 'visibility': { 'type': 'string', 'description': _('Scope of image accessibility'), 'enum': ['public', 'private'], }, 'protected': { 'type': 'boolean', 'description': _('If true, image will not be deletable.'), }, 'checksum': { 'type': ['null', 'string'], 'readOnly': True, 'description': _('md5 hash of image contents.'), 'maxLength': 32, }, 'owner': { 'type': ['null', 'string'], 'description': _('Owner of the image'), 'maxLength': 255, }, 'size': { 'type': ['null', 'integer'], 'readOnly': True, 'description': _('Size of image file in bytes'), }, 'virtual_size': { 'type': ['null', 'integer'], 'readOnly': True, 'description': _('Virtual size of image in bytes'), }, 'container_format': { 'type': ['null', 'string'], 'description': _('Format of the container'), 'enum': [None] + CONF.image_format.container_formats, }, 'disk_format': { 'type': ['null', 'string'], 'description': _('Format of the disk'), 'enum': [None] + CONF.image_format.disk_formats, }, 'created_at': { 'type': 'string', 'readOnly': True, 'description': _('Date and time of image registration' ), # TODO(bcwaldon): our jsonschema library doesn't seem to like the # format attribute, figure out why! # 'format': 'date-time', }, 'updated_at': { 'type': 'string', 'readOnly': True, 'description': _('Date and time of the last image modification' ), # 'format': 'date-time', }, 'tags': { 'type': 'array', 'description': _('List of strings related to the image'), 'items': { 'type': 'string', 'maxLength': 255, }, }, 'direct_url': { 'type': 'string', 'readOnly': True, 'description': _('URL to access the image file kept in external ' 'store'), }, 'min_ram': { 'type': 'integer', 'description': _('Amount of ram (in MB) required to boot image.'), }, 'min_disk': { 'type': 'integer', 'description': _('Amount of disk space (in GB) required to boot ' 'image.'), }, 'self': { 'type': 'string', 'readOnly': True, 'description': _('An image self url'), }, 'file': { 'type': 'string', 'readOnly': True, 'description': _('An image file url'), }, 'schema': { 'type': 'string', 'readOnly': True, 'description': _('An image schema url'), }, 'locations': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'url': { 'type': 'string', 'maxLength': 255, }, 'metadata': { 'type': 'object', }, }, 'required': ['url', 'metadata'], }, 'description': _('A set of URLs to access the image file kept in ' 'external store'), }, } def _get_base_links(): return [ {'rel': 'self', 'href': '{self}'}, {'rel': 'enclosure', 'href': '{file}'}, {'rel': 'describedby', 'href': '{schema}'}, ] def get_schema(custom_properties=None): properties = get_base_properties() links = _get_base_links() if CONF.allow_additional_image_properties: schema = glance.schema.PermissiveSchema('image', properties, links) else: schema = glance.schema.Schema('image', properties) if custom_properties: for property_value in custom_properties.values(): property_value['is_base'] = False schema.merge_properties(custom_properties) return schema def get_collection_schema(custom_properties=None): image_schema = get_schema(custom_properties) return glance.schema.CollectionSchema('images', image_schema) def load_custom_properties(): """Find the schema properties files and load them into a dict.""" filename = 'schema-image.json' match = CONF.find_file(filename) if match: with open(match, 'r') as schema_file: schema_data = schema_file.read() return json.loads(schema_data) else: msg = (_LW('Could not find schema properties file %s. Continuing ' 'without custom properties') % filename) LOG.warn(msg) return {} def create_resource(custom_properties=None): """Images resource factory method""" schema = get_schema(custom_properties) deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ImagesController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/tasks.py0000664000567000056710000003561512701407047020230 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # All Rights Reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import debtcollector import glance_store from oslo_config import cfg from oslo_log import log as logging import oslo_serialization.jsonutils as json from oslo_utils import encodeutils from oslo_utils import uuidutils import six import six.moves.urllib.parse as urlparse import webob.exc from glance.api import common from glance.api import policy from glance.common import exception from glance.common import timeutils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _, _LW import glance.notifier import glance.schema LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('task_time_to_live', 'glance.common.config', group='task') _DEPRECATION_MESSAGE = ("The task API is being deprecated and " "it will be superseded by the new image import " "API. Please refer to this link for more " "information about the aforementioned process: " "https://specs.openstack.org/openstack/glance-specs/" "specs/mitaka/approved/image-import/" "image-import-refactor.html") class TasksController(object): """Manages operations on tasks.""" def __init__(self, db_api=None, policy_enforcer=None, notifier=None, store_api=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.store_api = store_api or glance_store self.gateway = glance.gateway.Gateway(self.db_api, self.store_api, self.notifier, self.policy) @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) def create(self, req, task): task_factory = self.gateway.get_task_factory(req.context) executor_factory = self.gateway.get_task_executor_factory(req.context) task_repo = self.gateway.get_task_repo(req.context) try: new_task = task_factory.new_task(task_type=task['type'], owner=req.context.owner, task_input=task['input']) task_repo.add(new_task) task_executor = executor_factory.new_task_executor(req.context) pool = common.get_thread_pool("tasks_eventlet_pool") pool.spawn_n(new_task.run, task_executor) except exception.Forbidden as e: msg = (_LW("Forbidden to create task. Reason: %(reason)s") % {'reason': encodeutils.exception_to_unicode(e)}) LOG.warn(msg) raise webob.exc.HTTPForbidden(explanation=e.msg) return new_task @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) def index(self, req, marker=None, limit=None, sort_key='created_at', sort_dir='desc', filters=None): result = {} if filters is None: filters = {} filters['deleted'] = False if limit is None: limit = CONF.limit_param_default limit = min(CONF.api_limit_max, limit) task_repo = self.gateway.get_task_stub_repo(req.context) try: tasks = task_repo.list(marker, limit, sort_key, sort_dir, filters) if len(tasks) != 0 and len(tasks) == limit: result['next_marker'] = tasks[-1].task_id except (exception.NotFound, exception.InvalidSortKey, exception.InvalidFilterRangeValue) as e: LOG.warn(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden as e: LOG.warn(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPForbidden(explanation=e.msg) result['tasks'] = tasks return result @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) def get(self, req, task_id): try: task_repo = self.gateway.get_task_repo(req.context) task = task_repo.get(task_id) except exception.NotFound as e: msg = (_LW("Failed to find task %(task_id)s. Reason: %(reason)s") % {'task_id': task_id, 'reason': encodeutils.exception_to_unicode(e)}) LOG.warn(msg) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: msg = (_LW("Forbidden to get task %(task_id)s. Reason:" " %(reason)s") % {'task_id': task_id, 'reason': encodeutils.exception_to_unicode(e)}) LOG.warn(msg) raise webob.exc.HTTPForbidden(explanation=e.msg) return task @debtcollector.removals.remove(message=_DEPRECATION_MESSAGE) def delete(self, req, task_id): msg = (_("This operation is currently not permitted on Glance Tasks. " "They are auto deleted after reaching the time based on " "their expires_at property.")) raise webob.exc.HTTPMethodNotAllowed(explanation=msg, headers={'Allow': 'GET'}, body_template='${explanation}') class RequestDeserializer(wsgi.JSONRequestDeserializer): _required_properties = ['type', 'input'] def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] def _validate_sort_dir(self, sort_dir): if sort_dir not in ['asc', 'desc']: msg = _('Invalid sort direction: %s') % sort_dir raise webob.exc.HTTPBadRequest(explanation=msg) return sort_dir def _get_filters(self, filters): status = filters.get('status') if status: if status not in ['pending', 'processing', 'success', 'failure']: msg = _('Invalid status value: %s') % status raise webob.exc.HTTPBadRequest(explanation=msg) type = filters.get('type') if type: if type not in ['import']: msg = _('Invalid type value: %s') % type raise webob.exc.HTTPBadRequest(explanation=msg) return filters def _validate_marker(self, marker): if marker and not uuidutils.is_uuid_like(marker): msg = _('Invalid marker format') raise webob.exc.HTTPBadRequest(explanation=msg) return marker def _validate_limit(self, limit): try: limit = int(limit) except ValueError: msg = _("limit param must be an integer") raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _("limit param must be positive") raise webob.exc.HTTPBadRequest(explanation=msg) return limit def _validate_create_body(self, body): """Validate the body of task creating request""" for param in self._required_properties: if param not in body: msg = _("Task '%s' is required") % param raise webob.exc.HTTPBadRequest(explanation=msg) def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_task_schema() def create(self, request): body = self._get_request_body(request) self._validate_create_body(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) task = {} properties = body for key in self._required_properties: try: task[key] = properties.pop(key) except KeyError: pass return dict(task=task) def index(self, request): params = request.params.copy() limit = params.pop('limit', None) marker = params.pop('marker', None) sort_dir = params.pop('sort_dir', 'desc') query_params = { 'sort_key': params.pop('sort_key', 'created_at'), 'sort_dir': self._validate_sort_dir(sort_dir), 'filters': self._get_filters(params) } if marker is not None: query_params['marker'] = self._validate_marker(marker) if limit is not None: query_params['limit'] = self._validate_limit(limit) return query_params class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, task_schema=None, partial_task_schema=None): super(ResponseSerializer, self).__init__() self.task_schema = task_schema or get_task_schema() self.partial_task_schema = (partial_task_schema or _get_partial_task_schema()) def _inject_location_header(self, response, task): location = self._get_task_location(task) response.headers['Location'] = location.encode('utf-8') def _get_task_location(self, task): return '/v2/tasks/%s' % task.task_id def _format_task(self, schema, task): task_view = { 'id': task.task_id, 'input': task.task_input, 'type': task.type, 'status': task.status, 'owner': task.owner, 'message': task.message, 'result': task.result, 'created_at': timeutils.isotime(task.created_at), 'updated_at': timeutils.isotime(task.updated_at), 'self': self._get_task_location(task), 'schema': '/v2/schemas/task' } if task.expires_at: task_view['expires_at'] = timeutils.isotime(task.expires_at) task_view = schema.filter(task_view) # domain return task_view def _format_task_stub(self, schema, task): task_view = { 'id': task.task_id, 'type': task.type, 'status': task.status, 'owner': task.owner, 'created_at': timeutils.isotime(task.created_at), 'updated_at': timeutils.isotime(task.updated_at), 'self': self._get_task_location(task), 'schema': '/v2/schemas/task' } if task.expires_at: task_view['expires_at'] = timeutils.isotime(task.expires_at) task_view = schema.filter(task_view) # domain return task_view def create(self, response, task): response.status_int = 201 self._inject_location_header(response, task) self.get(response, task) def get(self, response, task): task_view = self._format_task(self.task_schema, task) body = json.dumps(task_view, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def index(self, response, result): params = dict(response.request.params) params.pop('marker', None) query = urlparse.urlencode(params) body = { 'tasks': [self._format_task_stub(self.partial_task_schema, task) for task in result['tasks']], 'first': '/v2/tasks', 'schema': '/v2/schemas/tasks', } if query: body['first'] = '%s?%s' % (body['first'], query) if 'next_marker' in result: params['marker'] = result['next_marker'] next_query = urlparse.urlencode(params) body['next'] = '/v2/tasks?%s' % next_query response.unicode_body = six.text_type(json.dumps(body, ensure_ascii=False)) response.content_type = 'application/json' _TASK_SCHEMA = { "id": { "description": _("An identifier for the task"), "pattern": _('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), "type": "string" }, "type": { "description": _("The type of task represented by this content"), "enum": [ "import", ], "type": "string" }, "status": { "description": _("The current status of this task"), "enum": [ "pending", "processing", "success", "failure" ], "type": "string" }, "input": { "description": _("The parameters required by task, JSON blob"), "type": ["null", "object"], }, "result": { "description": _("The result of current task, JSON blob"), "type": ["null", "object"], }, "owner": { "description": _("An identifier for the owner of this task"), "type": "string" }, "message": { "description": _("Human-readable informative message only included" " when appropriate (usually on failure)"), "type": "string", }, "expires_at": { "description": _("Datetime when this resource would be" " subject to removal"), "type": ["null", "string"] }, "created_at": { "description": _("Datetime when this resource was created"), "type": "string" }, "updated_at": { "description": _("Datetime when this resource was updated"), "type": "string" }, 'self': { 'readOnly': True, 'type': 'string' }, 'schema': { 'readOnly': True, 'type': 'string' } } def get_task_schema(): properties = copy.deepcopy(_TASK_SCHEMA) schema = glance.schema.Schema('task', properties) return schema def _get_partial_task_schema(): properties = copy.deepcopy(_TASK_SCHEMA) hide_properties = ['input', 'result', 'message'] for key in hide_properties: del properties[key] schema = glance.schema.Schema('task', properties) return schema def get_collection_schema(): task_schema = _get_partial_task_schema() return glance.schema.CollectionSchema('tasks', task_schema) def create_resource(): """Task resource factory method""" task_schema = get_task_schema() partial_task_schema = _get_partial_task_schema() deserializer = RequestDeserializer(task_schema) serializer = ResponseSerializer(task_schema, partial_task_schema) controller = TasksController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v2/image_data.py0000664000567000056710000003342712701407047021155 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils import webob.exc import glance.api.policy from glance.common import exception from glance.common import trust_auth from glance.common import utils from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _, _LE, _LI import glance.notifier LOG = logging.getLogger(__name__) CONF = cfg.CONF class ImageDataController(object): def __init__(self, db_api=None, store_api=None, policy_enforcer=None, notifier=None, gateway=None): if gateway is None: db_api = db_api or glance.db.get_api() store_api = store_api or glance_store policy = policy_enforcer or glance.api.policy.Enforcer() notifier = notifier or glance.notifier.Notifier() gateway = glance.gateway.Gateway(db_api, store_api, notifier, policy) self.gateway = gateway def _restore(self, image_repo, image): """ Restore the image to queued status. :param image_repo: The instance of ImageRepo :param image: The image will be restored """ try: if image_repo and image: image.status = 'queued' image_repo.save(image) except Exception as e: msg = (_LE("Unable to restore image %(image_id)s: %(e)s") % {'image_id': image.image_id, 'e': encodeutils.exception_to_unicode(e)}) LOG.exception(msg) def _delete(self, image_repo, image): """Delete the image. :param image_repo: The instance of ImageRepo :param image: The image that will be deleted """ try: if image_repo and image: image.status = 'killed' image_repo.save(image) except Exception as e: msg = (_LE("Unable to delete image %(image_id)s: %(e)s") % {'image_id': image.image_id, 'e': encodeutils.exception_to_unicode(e)}) LOG.exception(msg) @utils.mutating def upload(self, req, image_id, data, size): image_repo = self.gateway.get_repo(req.context) image = None refresher = None cxt = req.context try: image = image_repo.get(image_id) image.status = 'saving' try: if CONF.data_api == 'glance.db.registry.api': # create a trust if backend is registry try: # request user pluging for current token user_plugin = req.environ.get('keystone.token_auth') roles = [] # use roles from request environment because they # are not transformed to lower-case unlike cxt.roles for role_info in req.environ.get( 'keystone.token_info')['token']['roles']: roles.append(role_info['name']) refresher = trust_auth.TokenRefresher(user_plugin, cxt.tenant, roles) except Exception as e: LOG.info(_LI("Unable to create trust: %s " "Use the existing user token."), encodeutils.exception_to_unicode(e)) image_repo.save(image) image.set_data(data, size) try: image_repo.save(image, from_state='saving') except exception.NotAuthenticated as e: if refresher is not None: # request a new token to update an image in database cxt.auth_token = refresher.refresh_token() image_repo = self.gateway.get_repo(req.context) image_repo.save(image, from_state='saving') else: raise e try: # release resources required for re-auth if refresher is not None: refresher.release_resources() except Exception as e: LOG.info(_LI("Unable to delete trust %(trust)s: %(msg)s"), {"trust": refresher.trust_id, "msg": encodeutils.exception_to_unicode(e)}) except (glance_store.NotFound, exception.ImageNotFound, exception.Conflict): msg = (_("Image %s could not be found after upload. " "The image may have been deleted during the " "upload, cleaning up the chunks uploaded.") % image_id) LOG.warn(msg) # NOTE(sridevi): Cleaning up the uploaded chunks. try: image.delete() except exception.ImageNotFound: # NOTE(sridevi): Ignore this exception pass raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.NotAuthenticated: msg = (_("Authentication error - the token may have " "expired during file upload. Deleting image data for " "%s.") % image_id) LOG.debug(msg) try: image.delete() except exception.NotAuthenticated: # NOTE: Ignore this exception pass raise webob.exc.HTTPUnauthorized(explanation=msg, request=req, content_type='text/plain') except ValueError as e: LOG.debug("Cannot save data for image %(id)s: %(e)s", {'id': image_id, 'e': encodeutils.exception_to_unicode(e)}) self._restore(image_repo, image) raise webob.exc.HTTPBadRequest( explanation=encodeutils.exception_to_unicode(e)) except glance_store.StoreAddDisabled: msg = _("Error in store configuration. Adding images to store " "is disabled.") LOG.exception(msg) self._restore(image_repo, image) raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except exception.InvalidImageStatusTransition as e: msg = encodeutils.exception_to_unicode(e) LOG.exception(msg) raise webob.exc.HTTPConflict(explanation=e.msg, request=req) except exception.Forbidden as e: msg = ("Not allowed to upload image data for image %s" % image_id) LOG.debug(msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except glance_store.StorageFull as e: msg = _("Image storage media " "is full: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.StorageQuotaFull as e: msg = _("Image exceeds the storage " "quota: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except exception.ImageSizeLimitExceeded as e: msg = _("The incoming image is " "too large: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) except glance_store.StorageWriteDenied as e: msg = _("Insufficient permissions on image " "storage media: %s") % encodeutils.exception_to_unicode(e) LOG.error(msg) self._restore(image_repo, image) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req) except exception.SignatureVerificationError as e: msg = (_LE("Signature verification failed for image %(id)s: %(e)s") % {'id': image_id, 'e': encodeutils.exception_to_unicode(e)}) LOG.error(msg) self._delete(image_repo, image) raise webob.exc.HTTPBadRequest(explanation=msg) except webob.exc.HTTPGone as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to HTTP error")) except webob.exc.HTTPError as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to upload image data due to HTTP error")) self._restore(image_repo, image) except Exception as e: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to upload image data due to " "internal error")) self._restore(image_repo, image) def download(self, req, image_id): image_repo = self.gateway.get_repo(req.context) try: image = image_repo.get(image_id) if image.status == 'deactivated' and not req.context.is_admin: msg = _('The requested image has been deactivated. ' 'Image data download is forbidden.') raise exception.Forbidden(message=msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to download image '%s'", image_id) raise webob.exc.HTTPForbidden(explanation=e.msg) return image class RequestDeserializer(wsgi.JSONRequestDeserializer): def upload(self, request): try: request.get_content_type(('application/octet-stream',)) except exception.InvalidContentType as e: raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg) image_size = request.content_length or None return {'size': image_size, 'data': request.body_file} class ResponseSerializer(wsgi.JSONResponseSerializer): def download(self, response, image): offset, chunk_size = 0, None range_val = response.request.get_content_range() if range_val: # NOTE(flaper87): if not present, both, start # and stop, will be None. if range_val.start is not None: offset = range_val.start if range_val.stop is not None: chunk_size = range_val.stop - offset response.headers['Content-Type'] = 'application/octet-stream' try: # NOTE(markwash): filesystem store (and maybe others?) cause a # problem with the caching middleware if they are not wrapped in # an iterator very strange response.app_iter = iter(image.get_data(offset=offset, chunk_size=chunk_size)) except glance_store.NotFound as e: raise webob.exc.HTTPNoContent(explanation=e.msg) except glance_store.RemoteServiceUnavailable as e: raise webob.exc.HTTPServiceUnavailable(explanation=e.msg) except (glance_store.StoreGetNotSupported, glance_store.StoreRandomGetNotSupported) as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to download image '%s'", image) raise webob.exc.HTTPForbidden(explanation=e.msg) # NOTE(saschpe): "response.app_iter = ..." currently resets Content-MD5 # (https://github.com/Pylons/webob/issues/86), so it should be set # afterwards for the time being. if image.checksum: response.headers['Content-MD5'] = image.checksum # NOTE(markwash): "response.app_iter = ..." also erroneously resets the # content-length response.headers['Content-Length'] = str(image.size) def upload(self, response, result): response.status_int = 204 def create_resource(): """Image data resource factory method""" deserializer = RequestDeserializer() serializer = ResponseSerializer() controller = ImageDataController() return wsgi.Resource(controller, deserializer, serializer) glance-12.0.0/glance/api/v1/0000775000567000056710000000000012701407204016511 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/v1/upload_utils.py0000664000567000056710000003050112701407047021573 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store as store_api from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils import webob.exc from glance.common import exception from glance.common import store_utils from glance.common import utils import glance.db from glance.i18n import _, _LE, _LI import glance.registry.client.v1.api as registry CONF = cfg.CONF LOG = logging.getLogger(__name__) def initiate_deletion(req, location_data, id): """ Deletes image data from the location of backend store. :param req: The WSGI/Webob Request object :param location_data: Location to the image data in a data store :param id: Opaque image identifier """ store_utils.delete_image_location_from_backend(req.context, id, location_data) def _kill(req, image_id, from_state): """ Marks the image status to `killed`. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param from_state: Permitted current status for transition to 'killed' """ # TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html # needs updating to reflect the fact that queued->killed and saving->killed # are both allowed. registry.update_image_metadata(req.context, image_id, {'status': 'killed'}, from_state=from_state) def safe_kill(req, image_id, from_state): """ Mark image killed without raising exceptions if it fails. Since _kill is meant to be called from exceptions handlers, it should not raise itself, rather it should just log its error. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param from_state: Permitted current status for transition to 'killed' """ try: _kill(req, image_id, from_state) except Exception: LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id}) def upload_data_to_store(req, image_meta, image_data, store, notifier): """ Upload image data to specified store. Upload image data to the store and cleans up on error. """ image_id = image_meta['id'] db_api = glance.db.get_api() image_size = image_meta.get('size') try: # By default image_data will be passed as CooperativeReader object. # But if 'user_storage_quota' is enabled and 'remaining' is not None # then it will be passed as object of LimitingReader to # 'store_add_to_backend' method. image_data = utils.CooperativeReader(image_data) remaining = glance.api.common.check_quota( req.context, image_size, db_api, image_id=image_id) if remaining is not None: image_data = utils.LimitingReader(image_data, remaining) (uri, size, checksum, location_metadata) = store_api.store_add_to_backend( image_meta['id'], image_data, image_meta['size'], store, context=req.context) location_data = {'url': uri, 'metadata': location_metadata, 'status': 'active'} try: # recheck the quota in case there were simultaneous uploads that # did not provide the size glance.api.common.check_quota( req.context, size, db_api, image_id=image_id) except exception.StorageQuotaFull: with excutils.save_and_reraise_exception(): LOG.info(_LI('Cleaning up %s after exceeding ' 'the quota'), image_id) store_utils.safe_delete_from_backend( req.context, image_meta['id'], location_data) def _kill_mismatched(image_meta, attr, actual): supplied = image_meta.get(attr) if supplied and supplied != actual: msg = (_("Supplied %(attr)s (%(supplied)s) and " "%(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image " "status to 'killed'.") % {'attr': attr, 'supplied': supplied, 'actual': actual}) LOG.error(msg) safe_kill(req, image_id, 'saving') initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Verify any supplied size/checksum value matches size/checksum # returned from store when adding image _kill_mismatched(image_meta, 'size', size) _kill_mismatched(image_meta, 'checksum', checksum) # Update the database with the checksum returned # from the backend store LOG.debug("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d", {'image_id': image_id, 'checksum': checksum, 'size': size}) update_data = {'checksum': checksum, 'size': size} try: try: state = 'saving' image_meta = registry.update_image_metadata(req.context, image_id, update_data, from_state=state) except exception.Duplicate: image = registry.get_image_metadata(req.context, image_id) if image['status'] == 'deleted': raise exception.ImageNotFound() else: raise except exception.NotAuthenticated as e: # Delete image data due to possible token expiration. LOG.debug("Authentication error - the token may have " "expired during file upload. Deleting image data for " " %s " % image_id) initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPUnauthorized(explanation=e.msg, request=req) except exception.ImageNotFound: msg = _("Image %s could not be found after upload. The image may" " have been deleted during the upload.") % image_id LOG.info(msg) # NOTE(jculp): we need to clean up the datastore if an image # resource is deleted while the image data is being uploaded # # We get "location_data" from above call to store.add(), any # exceptions that occur there handle this same issue internally, # Since this is store-agnostic, should apply to all stores. initiate_deletion(req, location_data, image_id) raise webob.exc.HTTPPreconditionFailed(explanation=msg, request=req, content_type='text/plain') except store_api.StoreAddDisabled: msg = _("Error in store configuration. Adding images to store " "is disabled.") LOG.exception(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPGone(explanation=msg, request=req, content_type='text/plain') except (store_api.Duplicate, exception.Duplicate) as e: msg = (_("Attempt to upload duplicate image: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) # NOTE(dosaboy): do not delete the image since it is likely that this # conflict is a result of another concurrent upload that will be # successful. notifier.error('image.upload', msg) raise webob.exc.HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden as e: msg = (_("Forbidden upload attempt: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except store_api.StorageFull as e: msg = (_("Image storage media is full: %s") % encodeutils.exception_to_unicode(e)) LOG.error(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except store_api.StorageWriteDenied as e: msg = (_("Insufficient permissions on image storage media: %s") % encodeutils.exception_to_unicode(e)) LOG.error(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded as e: msg = (_("Denying attempt to upload image larger than %d bytes.") % CONF.image_size_cap) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageQuotaFull as e: msg = (_("Denying attempt to upload image because it exceeds the " "quota: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except webob.exc.HTTPError: # NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. msg = _LE("Received HTTP error while uploading image %s") % image_id notifier.error('image.upload', msg) with excutils.save_and_reraise_exception(): LOG.exception(msg) safe_kill(req, image_id, 'saving') except (ValueError, IOError) as e: msg = _("Client disconnected before sending all data to backend") LOG.warn(msg) safe_kill(req, image_id, 'saving') raise webob.exc.HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) except Exception as e: msg = _("Failed to upload image %s") % image_id LOG.exception(msg) safe_kill(req, image_id, 'saving') notifier.error('image.upload', msg) raise webob.exc.HTTPInternalServerError(explanation=msg, request=req, content_type='text/plain') return image_meta, location_data glance-12.0.0/glance/api/v1/filters.py0000664000567000056710000000255312701407047020545 0ustar jenkinsjenkins00000000000000# Copyright 2012, Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def validate(filter, value): return FILTER_FUNCTIONS.get(filter, lambda v: True)(value) def validate_int_in_range(min=0, max=None): def _validator(v): try: if max is None: return min <= int(v) return min <= int(v) <= max except ValueError: return False return _validator def validate_boolean(v): return v.lower() in ('none', 'true', 'false', '1', '0') FILTER_FUNCTIONS = {'size_max': validate_int_in_range(), # build validator 'size_min': validate_int_in_range(), # build validator 'min_ram': validate_int_in_range(), # build validator 'protected': validate_boolean, 'is_public': validate_boolean, } glance-12.0.0/glance/api/v1/members.py0000664000567000056710000002221612701407051020520 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation. # Copyright 2013 NTT corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import webob.exc from glance.api import policy from glance.api.v1 import controller from glance.common import exception from glance.common import utils from glance.common import wsgi from glance.i18n import _ import glance.registry.client.v1.api as registry LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('image_member_quota', 'glance.common.config') class Controller(controller.BaseController): def __init__(self): self.policy = policy.Enforcer() def _check_can_access_image_members(self, context): if context.owner is None and not context.is_admin: raise webob.exc.HTTPUnauthorized(_("No authenticated user")) def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: LOG.debug("User not permitted to perform '%s' action", action) raise webob.exc.HTTPForbidden() def _raise_404_if_image_deleted(self, req, image_id): image = self.get_image_meta_or_404(req, image_id) if image['status'] == 'deleted': msg = _("Image with identifier %s has been deleted.") % image_id raise webob.exc.HTTPNotFound(msg) def index(self, req, image_id): """ Return a list of dictionaries indicating the members of the image, i.e., those tenants the image is shared with. :param req: the Request object coming from the wsgi layer :param image_id: The opaque image identifier :retval The response body is a mapping of the following form:: {'members': [ {'member_id': , 'can_share': , ...}, ... ]} """ self._enforce(req, 'get_members') self._raise_404_if_image_deleted(req, image_id) try: members = registry.get_image_members(req.context, image_id) except exception.NotFound: msg = _("Image with identifier %s not found") % image_id LOG.warn(msg) raise webob.exc.HTTPNotFound(msg) except exception.Forbidden: msg = _("Unauthorized image access") LOG.warn(msg) raise webob.exc.HTTPForbidden(msg) return dict(members=members) @utils.mutating def delete(self, req, image_id, id): """ Removes a membership from the image. """ self._check_can_access_image_members(req.context) self._enforce(req, 'delete_member') self._raise_404_if_image_deleted(req, image_id) try: registry.delete_member(req.context, image_id, id) self._update_store_acls(req, image_id) except exception.NotFound as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug("User not permitted to remove membership from image " "'%s'", image_id) raise webob.exc.HTTPNotFound(explanation=e.msg) return webob.exc.HTTPNoContent() def default(self, req, image_id, id, body=None): """This will cover the missing 'show' and 'create' actions""" raise webob.exc.HTTPMethodNotAllowed() def _enforce_image_member_quota(self, req, attempted): if CONF.image_member_quota < 0: # If value is negative, allow unlimited number of members return maximum = CONF.image_member_quota if attempted > maximum: msg = _("The limit has been exceeded on the number of allowed " "image members for this image. Attempted: %(attempted)s, " "Maximum: %(maximum)s") % {'attempted': attempted, 'maximum': maximum} raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, request=req) @utils.mutating def update(self, req, image_id, id, body=None): """ Adds a membership to the image, or updates an existing one. If a body is present, it is a dict with the following format:: {"member": { "can_share": [True|False] }} If "can_share" is provided, the member's ability to share is set accordingly. If it is not provided, existing memberships remain unchanged and new memberships default to False. """ self._check_can_access_image_members(req.context) self._enforce(req, 'modify_member') self._raise_404_if_image_deleted(req, image_id) new_number_of_members = len(registry.get_image_members(req.context, image_id)) + 1 self._enforce_image_member_quota(req, new_number_of_members) # Figure out can_share can_share = None if body and 'member' in body and 'can_share' in body['member']: can_share = bool(body['member']['can_share']) try: registry.add_member(req.context, image_id, id, can_share) self._update_store_acls(req, image_id) except exception.Invalid as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotFound as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPNotFound(explanation=e.msg) return webob.exc.HTTPNoContent() @utils.mutating def update_all(self, req, image_id, body): """ Replaces the members of the image with those specified in the body. The body is a dict with the following format:: {"memberships": [ {"member_id": , ["can_share": [True|False]]}, ... ]} """ self._check_can_access_image_members(req.context) self._enforce(req, 'modify_member') self._raise_404_if_image_deleted(req, image_id) memberships = body.get('memberships') if memberships: new_number_of_members = len(body['memberships']) self._enforce_image_member_quota(req, new_number_of_members) try: registry.replace_members(req.context, image_id, body) self._update_store_acls(req, image_id) except exception.Invalid as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPBadRequest(explanation=e.msg) except exception.NotFound as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPNotFound(explanation=e.msg) return webob.exc.HTTPNoContent() def index_shared_images(self, req, id): """ Retrieves list of image memberships for the given member. :param req: the Request object coming from the wsgi layer :param id: the opaque member identifier :retval The response body is a mapping of the following form:: {'shared_images': [ {'image_id': , 'can_share': , ...}, ... ]} """ try: members = registry.get_member_images(req.context, id) except exception.NotFound as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Forbidden as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPForbidden(explanation=e.msg) return dict(shared_images=members) def _update_store_acls(self, req, image_id): image_meta = self.get_image_meta_or_404(req, image_id) location_uri = image_meta.get('location') public = image_meta.get('is_public') self.update_store_acls(req, image_id, location_uri, public) def create_resource(): """Image members resource factory method""" deserializer = wsgi.JSONRequestDeserializer() serializer = wsgi.JSONResponseSerializer() return wsgi.Resource(Controller(), deserializer, serializer) glance-12.0.0/glance/api/v1/__init__.py0000664000567000056710000000214112701407047020625 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', 'min_ram', 'min_disk', 'size_min', 'size_max', 'is_public', 'changes-since', 'protected'] SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') # Metadata which only an admin can change once the image is active ACTIVE_IMMUTABLE = ('size', 'checksum') # Metadata which cannot be changed (irrespective of the current image state) IMMUTABLE = ('status', 'id') glance-12.0.0/glance/api/v1/controller.py0000664000567000056710000001013212701407047021250 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glance_store as store from oslo_log import log as logging import webob.exc from glance.common import exception from glance.i18n import _ import glance.registry.client.v1.api as registry LOG = logging.getLogger(__name__) class BaseController(object): def get_image_meta_or_404(self, request, image_id): """ Grabs the image metadata for an image with a supplied identifier or raises an HTTPNotFound (404) response :param request: The WSGI/Webob Request object :param image_id: The opaque image identifier :raises: HTTPNotFound if image does not exist """ context = request.context try: return registry.get_image_metadata(context, image_id) except exception.NotFound: LOG.debug("Image with identifier %s not found", image_id) msg = _("Image with identifier %s not found") % image_id raise webob.exc.HTTPNotFound( msg, request=request, content_type='text/plain') except exception.Forbidden: LOG.debug("Forbidden image access") raise webob.exc.HTTPForbidden(_("Forbidden image access"), request=request, content_type='text/plain') def get_active_image_meta_or_error(self, request, image_id): """ Same as get_image_meta_or_404 except that it will raise a 403 if the image is deactivated or 404 if the image is otherwise not 'active'. """ image = self.get_image_meta_or_404(request, image_id) if image['status'] == 'deactivated': LOG.debug("Image %s is deactivated", image_id) msg = _("Image %s is deactivated") % image_id raise webob.exc.HTTPForbidden( msg, request=request, content_type='text/plain') if image['status'] != 'active': LOG.debug("Image %s is not active", image_id) msg = _("Image %s is not active") % image_id raise webob.exc.HTTPNotFound( msg, request=request, content_type='text/plain') return image def update_store_acls(self, req, image_id, location_uri, public=False): if location_uri: try: read_tenants = [] write_tenants = [] members = registry.get_image_members(req.context, image_id) if members: for member in members: if member['can_share']: write_tenants.append(member['member_id']) else: read_tenants.append(member['member_id']) store.set_acls(location_uri, public=public, read_tenants=read_tenants, write_tenants=write_tenants, context=req.context) except store.UnknownScheme: msg = _("Store for image_id not found: %s") % image_id raise webob.exc.HTTPBadRequest(explanation=msg, request=req, content_type='text/plain') except store.NotFound: msg = _("Data for image_id not found: %s") % image_id raise webob.exc.HTTPNotFound(explanation=msg, request=req, content_type='text/plain') glance-12.0.0/glance/api/v1/router.py0000664000567000056710000001106012701407047020406 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from glance.api.v1 import images from glance.api.v1 import members from glance.common import wsgi class API(wsgi.Router): """WSGI router for Glance v1 API requests.""" def __init__(self, mapper): reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) images_resource = images.create_resource() mapper.connect("/", controller=images_resource, action="index") mapper.connect("/images", controller=images_resource, action='index', conditions={'method': ['GET']}) mapper.connect("/images", controller=images_resource, action='create', conditions={'method': ['POST']}) mapper.connect("/images", controller=reject_method_resource, action='reject', allowed_methods='GET, POST') mapper.connect("/images/detail", controller=images_resource, action='detail', conditions={'method': ['GET', 'HEAD']}) mapper.connect("/images/detail", controller=reject_method_resource, action='reject', allowed_methods='GET, HEAD') mapper.connect("/images/{id}", controller=images_resource, action="meta", conditions=dict(method=["HEAD"])) mapper.connect("/images/{id}", controller=images_resource, action="show", conditions=dict(method=["GET"])) mapper.connect("/images/{id}", controller=images_resource, action="update", conditions=dict(method=["PUT"])) mapper.connect("/images/{id}", controller=images_resource, action="delete", conditions=dict(method=["DELETE"])) mapper.connect("/images/{id}", controller=reject_method_resource, action='reject', allowed_methods='GET, HEAD, PUT, DELETE') members_resource = members.create_resource() mapper.connect("/images/{image_id}/members", controller=members_resource, action="index", conditions={'method': ['GET']}) mapper.connect("/images/{image_id}/members", controller=members_resource, action="update_all", conditions=dict(method=["PUT"])) mapper.connect("/images/{image_id}/members", controller=reject_method_resource, action='reject', allowed_methods='GET, PUT') mapper.connect("/images/{image_id}/members/{id}", controller=members_resource, action="show", conditions={'method': ['GET']}) mapper.connect("/images/{image_id}/members/{id}", controller=members_resource, action="update", conditions={'method': ['PUT']}) mapper.connect("/images/{image_id}/members/{id}", controller=members_resource, action="delete", conditions={'method': ['DELETE']}) mapper.connect("/images/{image_id}/members/{id}", controller=reject_method_resource, action='reject', allowed_methods='GET, PUT, DELETE') mapper.connect("/shared-images/{id}", controller=members_resource, action="index_shared_images") super(API, self).__init__(mapper) glance-12.0.0/glance/api/v1/images.py0000664000567000056710000016020212701407051020331 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ /images endpoint for Glance v1 API """ import copy import glance_store as store import glance_store.location from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import strutils import six from webob.exc import HTTPBadRequest from webob.exc import HTTPConflict from webob.exc import HTTPForbidden from webob.exc import HTTPMethodNotAllowed from webob.exc import HTTPNotFound from webob.exc import HTTPRequestEntityTooLarge from webob.exc import HTTPServiceUnavailable from webob.exc import HTTPUnauthorized from webob import Response from glance.api import common from glance.api import policy import glance.api.v1 from glance.api.v1 import controller from glance.api.v1 import filters from glance.api.v1 import upload_utils from glance.common import exception from glance.common import property_utils from glance.common import store_utils from glance.common import timeutils from glance.common import utils from glance.common import wsgi from glance.i18n import _, _LE, _LI, _LW from glance import notifier import glance.registry.client.v1.api as registry LOG = logging.getLogger(__name__) SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS ACTIVE_IMMUTABLE = glance.api.v1.ACTIVE_IMMUTABLE IMMUTABLE = glance.api.v1.IMMUTABLE CONF = cfg.CONF CONF.import_opt('disk_formats', 'glance.common.config', group='image_format') CONF.import_opt('container_formats', 'glance.common.config', group='image_format') CONF.import_opt('image_property_quota', 'glance.common.config') def _validate_time(req, values): """Validates time formats for updated_at, created_at and deleted_at. 'strftime' only allows values after 1900 in glance v1 so this is enforced here. This was introduced to keep modularity. """ for time_field in ['created_at', 'updated_at', 'deleted_at']: if time_field in values and values[time_field]: try: time = timeutils.parse_isotime(values[time_field]) # On Python 2, datetime.datetime.strftime() raises a ValueError # for years older than 1900. On Python 3, years older than 1900 # are accepted. But we explicitly want to reject timestamps # older than January 1st, 1900 for Glance API v1. if time.year < 1900: raise ValueError values[time_field] = time.strftime( timeutils.PERFECT_TIME_FORMAT) except ValueError: msg = (_("Invalid time format for %s.") % time_field) raise HTTPBadRequest(explanation=msg, request=req) def _validate_format(req, values): """Validates disk_format and container_format fields Introduced to split too complex validate_image_meta method. """ amazon_formats = ('aki', 'ari', 'ami') disk_format = values.get('disk_format') container_format = values.get('container_format') if 'disk_format' in values: if disk_format not in CONF.image_format.disk_formats: msg = _("Invalid disk format '%s' for image.") % disk_format raise HTTPBadRequest(explanation=msg, request=req) if 'container_format' in values: if container_format not in CONF.image_format.container_formats: msg = _("Invalid container format '%s' " "for image.") % container_format raise HTTPBadRequest(explanation=msg, request=req) if any(f in amazon_formats for f in [disk_format, container_format]): if disk_format is None: values['disk_format'] = container_format elif container_format is None: values['container_format'] = disk_format elif container_format != disk_format: msg = (_("Invalid mix of disk and container formats. " "When setting a disk or container format to " "one of 'aki', 'ari', or 'ami', the container " "and disk formats must match.")) raise HTTPBadRequest(explanation=msg, request=req) def validate_image_meta(req, values): _validate_format(req, values) _validate_time(req, values) name = values.get('name') checksum = values.get('checksum') if name and len(name) > 255: msg = _('Image name too long: %d') % len(name) raise HTTPBadRequest(explanation=msg, request=req) # check that checksum retrieved is exactly 32 characters # as long as we expect md5 checksum # https://bugs.launchpad.net/glance/+bug/1454730 if checksum and len(checksum) > 32: msg = (_("Invalid checksum '%s': can't exceed 32 characters") % checksum) raise HTTPBadRequest(explanation=msg, request=req) return values def redact_loc(image_meta, copy_dict=True): """ Create a shallow copy of image meta with 'location' removed for security (as it can contain credentials). """ if copy_dict: new_image_meta = copy.copy(image_meta) else: new_image_meta = image_meta new_image_meta.pop('location', None) new_image_meta.pop('location_data', None) return new_image_meta class Controller(controller.BaseController): """ WSGI controller for images resource in Glance v1 API The images resource API is a RESTful web service for image data. The API is as follows:: GET /images -- Returns a set of brief metadata about images GET /images/detail -- Returns a set of detailed metadata about images HEAD /images/ -- Return metadata about an image with id GET /images/ -- Return image data for image with id POST /images -- Store image data and return metadata about the newly-stored image PUT /images/ -- Update image metadata and/or upload image data for a previously-reserved image DELETE /images/ -- Delete the image with id """ def __init__(self): self.notifier = notifier.Notifier() registry.configure_registry_client() self.policy = policy.Enforcer() if property_utils.is_property_protection_enabled(): self.prop_enforcer = property_utils.PropertyRules(self.policy) else: self.prop_enforcer = None def _enforce(self, req, action, target=None): """Authorize an action against our policies""" if target is None: target = {} try: self.policy.enforce(req.context, action, target) except exception.Forbidden: LOG.debug("User not permitted to perform '%s' action", action) raise HTTPForbidden() def _enforce_image_property_quota(self, image_meta, orig_image_meta=None, purge_props=False, req=None): if CONF.image_property_quota < 0: # If value is negative, allow unlimited number of properties return props = list(image_meta['properties'].keys()) # NOTE(ameade): If we are not removing existing properties, # take them in to account if (not purge_props) and orig_image_meta: original_props = orig_image_meta['properties'].keys() props.extend(original_props) props = set(props) if len(props) > CONF.image_property_quota: msg = (_("The limit has been exceeded on the number of allowed " "image properties. Attempted: %(num)s, Maximum: " "%(quota)s") % {'num': len(props), 'quota': CONF.image_property_quota}) LOG.warn(msg) raise HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type="text/plain") def _enforce_create_protected_props(self, create_props, req): """ Check request is permitted to create certain properties :param create_props: List of properties to check :param req: The WSGI/Webob Request object :raises: HTTPForbidden if request forbidden to create a property """ if property_utils.is_property_protection_enabled(): for key in create_props: if (self.prop_enforcer.check_property_rules( key, 'create', req.context) is False): msg = _("Property '%s' is protected") % key LOG.warn(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") def _enforce_read_protected_props(self, image_meta, req): """ Remove entries from metadata properties if they are read protected :param image_meta: Mapping of metadata about image :param req: The WSGI/Webob Request object """ if property_utils.is_property_protection_enabled(): for key in list(image_meta['properties'].keys()): if (self.prop_enforcer.check_property_rules( key, 'read', req.context) is False): image_meta['properties'].pop(key) def _enforce_update_protected_props(self, update_props, image_meta, orig_meta, req): """ Check request is permitted to update certain properties. Read permission is required to delete a property. If the property value is unchanged, i.e. a noop, it is permitted, however, it is important to ensure read access first. Otherwise the value could be discovered using brute force. :param update_props: List of properties to check :param image_meta: Mapping of proposed new metadata about image :param orig_meta: Mapping of existing metadata about image :param req: The WSGI/Webob Request object :raises: HTTPForbidden if request forbidden to create a property """ if property_utils.is_property_protection_enabled(): for key in update_props: has_read = self.prop_enforcer.check_property_rules( key, 'read', req.context) if ((self.prop_enforcer.check_property_rules( key, 'update', req.context) is False and image_meta['properties'][key] != orig_meta['properties'][key]) or not has_read): msg = _("Property '%s' is protected") % key LOG.warn(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") def _enforce_delete_protected_props(self, delete_props, image_meta, orig_meta, req): """ Check request is permitted to delete certain properties. Read permission is required to delete a property. Note, the absence of a property in a request does not necessarily indicate a delete. The requester may not have read access, and so can not know the property exists. Hence, read access is a requirement for delete, otherwise the delete is ignored transparently. :param delete_props: List of properties to check :param image_meta: Mapping of proposed new metadata about image :param orig_meta: Mapping of existing metadata about image :param req: The WSGI/Webob Request object :raises: HTTPForbidden if request forbidden to create a property """ if property_utils.is_property_protection_enabled(): for key in delete_props: if (self.prop_enforcer.check_property_rules( key, 'read', req.context) is False): # NOTE(bourke): if read protected, re-add to image_meta to # prevent deletion image_meta['properties'][key] = orig_meta[ 'properties'][key] elif (self.prop_enforcer.check_property_rules( key, 'delete', req.context) is False): msg = _("Property '%s' is protected") % key LOG.warn(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") def index(self, req): """ Returns the following information for all public, available images: * id -- The opaque image identifier * name -- The name of the image * disk_format -- The disk image format * container_format -- The "container" format of the image * checksum -- MD5 checksum of the image data * size -- Size of image data in bytes :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': , 'name': , 'disk_format': , 'container_format': , 'checksum': 'size': }, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_list(req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(images=images) def detail(self, req): """ Returns detailed information for all available images :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': , 'name': , 'size': , 'disk_format': , 'container_format': , 'checksum': , 'min_disk': , 'min_ram': , 'store': , 'status': , 'created_at': , 'updated_at': , 'deleted_at': |, 'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ... ]} """ if req.method == 'HEAD': msg = (_("This operation is currently not permitted on " "Glance images details.")) raise HTTPMethodNotAllowed(explanation=msg, headers={'Allow': 'GET'}, body_template='${explanation}') self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_detail(req.context, **params) # Strip out the Location attribute. Temporary fix for # LP Bug #755916. This information is still coming back # from the registry, since the API server still needs access # to it, however we do not return this potential security # information to the API end user... for image in images: redact_loc(image, copy_dict=False) self._enforce_read_protected_props(image, req) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) except exception.NotAuthenticated as e: raise HTTPUnauthorized(explanation=e.msg, request=req) return dict(images=images) def _get_query_params(self, req): """ Extracts necessary query params from request. :param req: the WSGI Request object :retval dict of parameters that can be used by registry client """ params = {'filters': self._get_filters(req)} for PARAM in SUPPORTED_PARAMS: if PARAM in req.params: params[PARAM] = req.params.get(PARAM) # Fix for LP Bug #1132294 # Ensure all shared images are returned in v1 params['member_status'] = 'all' return params def _get_filters(self, req): """ Return a dictionary of query param filters from the request :param req: the Request object coming from the wsgi layer :retval a dict of key/value filters """ query_filters = {} for param in req.params: if param in SUPPORTED_FILTERS or param.startswith('property-'): query_filters[param] = req.params.get(param) if not filters.validate(param, query_filters[param]): raise HTTPBadRequest(_('Bad value passed to filter ' '%(filter)s got %(val)s') % {'filter': param, 'val': query_filters[param]}) return query_filters def meta(self, req, id): """ Returns metadata about an image in the HTTP headers of the response object :param req: The WSGI/Webob Request object :param id: The opaque image identifier :retval similar to 'show' method but without image_data :raises: HTTPNotFound if image metadata is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_image_meta_or_404(req, id) image_meta = redact_loc(image_meta) self._enforce_read_protected_props(image_meta, req) return { 'image_meta': image_meta } @staticmethod def _validate_source(source, req): """ Validate if external sources (as specified via the location or copy-from headers) are supported. Otherwise we reject with 400 "Bad Request". """ if store_utils.validate_external_location(source): return source else: if source: msg = _("External sources are not supported: '%s'") % source else: msg = _("External source should not be empty") LOG.warn(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") @staticmethod def _copy_from(req): return req.headers.get('x-glance-api-copy-from') def _external_source(self, image_meta, req): if 'location' in image_meta: self._enforce(req, 'set_image_location') source = image_meta['location'] elif 'x-glance-api-copy-from' in req.headers: source = Controller._copy_from(req) else: # we have an empty external source value # so we are creating "draft" of the image and no need validation return None return Controller._validate_source(source, req) @staticmethod def _get_from_store(context, where, dest=None): try: loc = glance_store.location.get_location_from_uri(where) src_store = store.get_store_from_uri(where) if dest is not None: src_store.READ_CHUNKSIZE = dest.WRITE_CHUNKSIZE image_data, image_size = src_store.get(loc, context=context) except store.RemoteServiceUnavailable as e: raise HTTPServiceUnavailable(explanation=e.msg) except store.NotFound as e: raise HTTPNotFound(explanation=e.msg) except (store.StoreGetNotSupported, store.StoreRandomGetNotSupported, store.UnknownScheme) as e: raise HTTPBadRequest(explanation=e.msg) image_size = int(image_size) if image_size else None return image_data, image_size def show(self, req, id): """ Returns an iterator that can be used to retrieve an image's data along with the image metadata. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises: HTTPNotFound if image is not available to user """ self._enforce(req, 'get_image') try: image_meta = self.get_active_image_meta_or_error(req, id) except HTTPNotFound: # provision for backward-compatibility breaking issue # catch the 404 exception and raise it after enforcing # the policy with excutils.save_and_reraise_exception(): self._enforce(req, 'download_image') else: target = utils.create_mashup_dict(image_meta) self._enforce(req, 'download_image', target=target) self._enforce_read_protected_props(image_meta, req) if image_meta.get('size') == 0: image_iterator = iter([]) else: image_iterator, size = self._get_from_store(req.context, image_meta['location']) image_iterator = utils.cooperative_iter(image_iterator) image_meta['size'] = size or image_meta['size'] image_meta = redact_loc(image_meta) return { 'image_iterator': image_iterator, 'image_meta': image_meta, } def _reserve(self, req, image_meta): """ Adds the image metadata to the registry and assigns an image identifier if one is not supplied in the request headers. Sets the image's status to `queued`. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :param image_meta: The image metadata :raises: HTTPConflict if image already exists :raises: HTTPBadRequest if image metadata is not valid """ location = self._external_source(image_meta, req) scheme = image_meta.get('store') if scheme and scheme not in store.get_known_schemes(): msg = _("Required store %s is invalid") % scheme LOG.warn(msg) raise HTTPBadRequest(explanation=msg, content_type='text/plain') image_meta['status'] = ('active' if image_meta.get('size') == 0 else 'queued') if location: try: backend = store.get_store_from_location(location) except (store.UnknownScheme, store.BadStoreUri): LOG.debug("Invalid location %s", location) msg = _("Invalid location %s") % location raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") # check the store exists before we hit the registry, but we # don't actually care what it is at this point self.get_store_or_400(req, backend) # retrieve the image size from remote store (if not provided) image_meta['size'] = self._get_size(req.context, image_meta, location) else: # Ensure that the size attribute is set to zero for directly # uploadable images (if not provided). The size will be set # to a non-zero value during upload image_meta['size'] = image_meta.get('size', 0) try: image_meta = registry.add_image_metadata(req.context, image_meta) self.notifier.info("image.create", redact_loc(image_meta)) return image_meta except exception.Duplicate: msg = (_("An image with identifier %s already exists") % image_meta['id']) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Invalid as e: msg = (_("Failed to reserve image. Got error: %s") % encodeutils.exception_to_unicode(e)) LOG.exception(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden: msg = _("Forbidden to reserve image.") LOG.warn(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") def _upload(self, req, image_meta): """ Uploads the payload of the request to a backend store in Glance. If the `x-image-meta-store` header is set, Glance will attempt to use that scheme; if not, Glance will use the scheme set by the flag `default_store` to find the backing store. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :raises: HTTPConflict if image already exists :retval The location where the image was stored """ scheme = req.headers.get('x-image-meta-store', CONF.glance_store.default_store) store = self.get_store_or_400(req, scheme) copy_from = self._copy_from(req) if copy_from: try: image_data, image_size = self._get_from_store(req.context, copy_from, dest=store) except Exception: upload_utils.safe_kill(req, image_meta['id'], 'queued') msg = (_LE("Copy from external source '%(scheme)s' failed for " "image: %(image)s") % {'scheme': scheme, 'image': image_meta['id']}) LOG.exception(msg) return image_meta['size'] = image_size or image_meta['size'] else: try: req.get_content_type(('application/octet-stream',)) except exception.InvalidContentType: upload_utils.safe_kill(req, image_meta['id'], 'queued') msg = _("Content-Type must be application/octet-stream") LOG.warn(msg) raise HTTPBadRequest(explanation=msg) image_data = req.body_file image_id = image_meta['id'] LOG.debug("Setting image %s to status 'saving'", image_id) registry.update_image_metadata(req.context, image_id, {'status': 'saving'}) LOG.debug("Uploading image data for image %(image_id)s " "to %(scheme)s store", {'image_id': image_id, 'scheme': scheme}) self.notifier.info("image.prepare", redact_loc(image_meta)) image_meta, location_data = upload_utils.upload_data_to_store( req, image_meta, image_data, store, self.notifier) self.notifier.info('image.upload', redact_loc(image_meta)) return location_data def _activate(self, req, image_id, location_data, from_state=None): """ Sets the image status to `active` and the image's location attribute. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param location_data: Location of where Glance stored this image """ image_meta = { 'location': location_data['url'], 'status': 'active', 'location_data': [location_data] } try: s = from_state image_meta_data = registry.update_image_metadata(req.context, image_id, image_meta, from_state=s) self.notifier.info("image.activate", redact_loc(image_meta_data)) self.notifier.info("image.update", redact_loc(image_meta_data)) return image_meta_data except exception.Duplicate: with excutils.save_and_reraise_exception(): # Delete image data since it has been superseded by another # upload and re-raise. LOG.debug("duplicate operation - deleting image data for " " %(id)s (location:%(location)s)", {'id': image_id, 'location': image_meta['location']}) upload_utils.initiate_deletion(req, location_data, image_id) except exception.Invalid as e: msg = (_("Failed to activate image. Got error: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") def _upload_and_activate(self, req, image_meta): """ Safely uploads the image data in the request payload and activates the image in the registry after a successful upload. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :retval Mapping of updated image data """ location_data = self._upload(req, image_meta) image_id = image_meta['id'] LOG.info(_LI("Uploaded data of image %s from request " "payload successfully."), image_id) if location_data: try: image_meta = self._activate(req, image_id, location_data, from_state='saving') except exception.Duplicate: raise except Exception: with excutils.save_and_reraise_exception(): # NOTE(zhiyan): Delete image data since it has already # been added to store by above _upload() call. LOG.warn(_LW("Failed to activate image %s in " "registry. About to delete image " "bits from store and update status " "to 'killed'.") % image_id) upload_utils.initiate_deletion(req, location_data, image_id) upload_utils.safe_kill(req, image_id, 'saving') else: image_meta = None return image_meta def _get_size(self, context, image_meta, location): # retrieve the image size from remote store (if not provided) try: return (image_meta.get('size', 0) or store.get_size_from_backend(location, context=context)) except store.NotFound as e: # NOTE(rajesht): The exception is logged as debug message because # the image is located at third-party server and it has nothing to # do with glance. If log.exception is used here, in that case the # log file might be flooded with exception log messages if # malicious user keeps on trying image-create using non-existent # location url. Used log.debug because administrator can # disable debug logs. LOG.debug(encodeutils.exception_to_unicode(e)) raise HTTPNotFound(explanation=e.msg, content_type="text/plain") except (store.UnknownScheme, store.BadStoreUri) as e: # NOTE(rajesht): See above note of store.NotFound LOG.debug(encodeutils.exception_to_unicode(e)) raise HTTPBadRequest(explanation=e.msg, content_type="text/plain") def _handle_source(self, req, image_id, image_meta, image_data): copy_from = self._copy_from(req) location = image_meta.get('location') sources = [obj for obj in (copy_from, location, image_data) if obj] if len(sources) >= 2: msg = _("It's invalid to provide multiple image sources.") LOG.warn(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") if len(sources) == 0: return image_meta if image_data: image_meta = self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._upload_and_activate(req, image_meta) elif copy_from: msg = _LI('Triggering asynchronous copy from external source') LOG.info(msg) pool = common.get_thread_pool("copy_from_eventlet_pool") pool.spawn_n(self._upload_and_activate, req, image_meta) else: if location: self._validate_image_for_activation(req, image_id, image_meta) image_size_meta = image_meta.get('size') if image_size_meta: try: image_size_store = store.get_size_from_backend( location, req.context) except (store.BadStoreUri, store.UnknownScheme) as e: LOG.debug(encodeutils.exception_to_unicode(e)) raise HTTPBadRequest(explanation=e.msg, request=req, content_type="text/plain") # NOTE(zhiyan): A returned size of zero usually means # the driver encountered an error. In this case the # size provided by the client will be used as-is. if (image_size_store and image_size_store != image_size_meta): msg = (_("Provided image size must match the stored" " image size. (provided size: %(ps)d, " "stored size: %(ss)d)") % {"ps": image_size_meta, "ss": image_size_store}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") location_data = {'url': location, 'metadata': {}, 'status': 'active'} image_meta = self._activate(req, image_id, location_data) return image_meta def _validate_image_for_activation(self, req, id, values): """Ensures that all required image metadata values are valid.""" image = self.get_image_meta_or_404(req, id) if values['disk_format'] is None: if not image['disk_format']: msg = _("Disk format is not specified.") raise HTTPBadRequest(explanation=msg, request=req) values['disk_format'] = image['disk_format'] if values['container_format'] is None: if not image['container_format']: msg = _("Container format is not specified.") raise HTTPBadRequest(explanation=msg, request=req) values['container_format'] = image['container_format'] if 'name' not in values: values['name'] = image['name'] values = validate_image_meta(req, values) return values @utils.mutating def create(self, req, image_meta, image_data): """ Adds a new image to Glance. Four scenarios exist when creating an image: 1. If the image data is available directly for upload, create can be passed the image data as the request body and the metadata as the request headers. The image will initially be 'queued', during upload it will be in the 'saving' status, and then 'killed' or 'active' depending on whether the upload completed successfully. 2. If the image data exists somewhere else, you can upload indirectly from the external source using the x-glance-api-copy-from header. Once the image is uploaded, the external store is not subsequently consulted, i.e. the image content is served out from the configured glance image store. State transitions are as for option #1. 3. If the image data exists somewhere else, you can reference the source using the x-image-meta-location header. The image content will be served out from the external store, i.e. is never uploaded to the configured glance image store. 4. If the image data is not available yet, but you'd like reserve a spot for it, you can omit the data and a record will be created in the 'queued' state. This exists primarily to maintain backwards compatibility with OpenStack/Rackspace API semantics. The request body *must* be encoded as application/octet-stream, otherwise an HTTPBadRequest is returned. Upon a successful save of the image data and metadata, a response containing metadata about the image is returned, including its opaque identifier. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :param image_data: Actual image data that is to be stored :raises: HTTPBadRequest if x-image-meta-location is missing and the request body is not application/octet-stream image data. """ self._enforce(req, 'add_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') if Controller._copy_from(req): self._enforce(req, 'copy_from') if image_data or Controller._copy_from(req): self._enforce(req, 'upload_image') self._enforce_create_protected_props(image_meta['properties'].keys(), req) self._enforce_image_property_quota(image_meta, req=req) image_meta = self._reserve(req, image_meta) id = image_meta['id'] image_meta = self._handle_source(req, id, image_meta, image_data) location_uri = image_meta.get('location') if location_uri: self.update_store_acls(req, id, location_uri, public=is_public) # Prevent client from learning the location, as it # could contain security credentials image_meta = redact_loc(image_meta) return {'image_meta': image_meta} @utils.mutating def update(self, req, id, image_meta, image_data): """ Updates an existing image with the registry. :param request: The WSGI/Webob Request object :param id: The opaque image identifier :retval Returns the updated image information as a mapping """ self._enforce(req, 'modify_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') if Controller._copy_from(req): self._enforce(req, 'copy_from') if image_data or Controller._copy_from(req): self._enforce(req, 'upload_image') orig_image_meta = self.get_image_meta_or_404(req, id) orig_status = orig_image_meta['status'] # Do not allow any updates on a deleted image. # Fix for LP Bug #1060930 if orig_status == 'deleted': msg = _("Forbidden to update deleted image.") raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") if req.context.is_admin is False: # Once an image is 'active' only an admin can # modify certain core metadata keys for key in ACTIVE_IMMUTABLE: if ((orig_status == 'active' or orig_status == 'deactivated') and key in image_meta and image_meta.get(key) != orig_image_meta.get(key)): msg = _("Forbidden to modify '%(key)s' of %(status)s " "image.") % {'key': key, 'status': orig_status} raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") for key in IMMUTABLE: if (key in image_meta and image_meta.get(key) != orig_image_meta.get(key)): msg = _("Forbidden to modify '%s' of image.") % key raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") # The default behaviour for a PUT /images/ is to # override any properties that were previously set. This, however, # leads to a number of issues for the common use case where a caller # registers an image with some properties and then almost immediately # uploads an image file along with some more properties. Here, we # check for a special header value to be false in order to force # properties NOT to be purged. However we also disable purging of # properties if an image file is being uploaded... purge_props = req.headers.get('x-glance-registry-purge-props', True) purge_props = (strutils.bool_from_string(purge_props) and image_data is None) if image_data is not None and orig_status != 'queued': raise HTTPConflict(_("Cannot upload to an unqueued image")) # Only allow the Location|Copy-From fields to be modified if the # image is in queued status, which indicates that the user called # POST /images but originally supply neither a Location|Copy-From # field NOR image data location = self._external_source(image_meta, req) reactivating = orig_status != 'queued' and location activating = orig_status == 'queued' and (location or image_data) # Make image public in the backend store (if implemented) orig_or_updated_loc = location or orig_image_meta.get('location') if orig_or_updated_loc: try: self.update_store_acls(req, id, orig_or_updated_loc, public=is_public) except store.BadStoreUri: msg = _("Invalid location: %s") % location LOG.warn(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") if reactivating: msg = _("Attempted to update Location field for an image " "not in queued status.") raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") # ensure requester has permissions to create/update/delete properties # according to property-protections.conf orig_keys = set(orig_image_meta['properties']) new_keys = set(image_meta['properties']) self._enforce_update_protected_props( orig_keys.intersection(new_keys), image_meta, orig_image_meta, req) self._enforce_create_protected_props( new_keys.difference(orig_keys), req) if purge_props: self._enforce_delete_protected_props( orig_keys.difference(new_keys), image_meta, orig_image_meta, req) self._enforce_image_property_quota(image_meta, orig_image_meta=orig_image_meta, purge_props=purge_props, req=req) try: if location: image_meta['size'] = self._get_size(req.context, image_meta, location) image_meta = registry.update_image_metadata(req.context, id, image_meta, purge_props) if activating: image_meta = self._handle_source(req, id, image_meta, image_data) except exception.Invalid as e: msg = (_("Failed to update image metadata. Got error: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.ImageNotFound as e: msg = (_("Failed to find image to update: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden as e: msg = (_("Forbidden to update image: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except (exception.Conflict, exception.Duplicate) as e: LOG.warn(encodeutils.exception_to_unicode(e)) raise HTTPConflict(body=_('Image operation conflicts'), request=req, content_type='text/plain') else: self.notifier.info('image.update', redact_loc(image_meta)) # Prevent client from learning the location, as it # could contain security credentials image_meta = redact_loc(image_meta) self._enforce_read_protected_props(image_meta, req) return {'image_meta': image_meta} @utils.mutating def delete(self, req, id): """ Deletes the image and all its chunks from the Glance :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises: HttpBadRequest if image registry is invalid :raises: HttpNotFound if image or any chunk is not available :raises: HttpUnauthorized if image or any chunk is not deleteable by the requesting user """ self._enforce(req, 'delete_image') image = self.get_image_meta_or_404(req, id) if image['protected']: msg = _("Image is protected") LOG.warn(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") if image['status'] == 'pending_delete': msg = (_("Forbidden to delete a %s image.") % image['status']) LOG.warn(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") elif image['status'] == 'deleted': msg = _("Image %s not found.") % id LOG.warn(msg) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") if image['location'] and CONF.delayed_delete: status = 'pending_delete' else: status = 'deleted' ori_status = image['status'] try: # Update the image from the registry first, since we rely on it # for authorization checks. # See https://bugs.launchpad.net/glance/+bug/1065187 image = registry.update_image_metadata(req.context, id, {'status': status}) try: # The image's location field may be None in the case # of a saving or queued image, therefore don't ask a backend # to delete the image if the backend doesn't yet store it. # See https://bugs.launchpad.net/glance/+bug/747799 if image['location']: for loc_data in image['location_data']: if loc_data['status'] == 'active': upload_utils.initiate_deletion(req, loc_data, id) except Exception: with excutils.save_and_reraise_exception(): registry.update_image_metadata(req.context, id, {'status': ori_status}) registry.delete_image_metadata(req.context, id) except exception.ImageNotFound as e: msg = (_("Failed to find image to delete: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden as e: msg = (_("Forbidden to delete image: %s") % encodeutils.exception_to_unicode(e)) LOG.warn(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except store.InUseByStore as e: msg = (_("Image %(id)s could not be deleted because it is in use: " "%(exc)s") % {"id": id, "exc": encodeutils.exception_to_unicode(e)}) LOG.warn(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.delete', redact_loc(image)) return Response(body='', status=200) def get_store_or_400(self, request, scheme): """ Grabs the storage backend for the supplied store name or raises an HTTPBadRequest (400) response :param request: The WSGI/Webob Request object :param scheme: The backend store scheme :raises: HTTPBadRequest if store does not exist """ try: return store.get_store_from_scheme(scheme) except store.UnknownScheme: msg = _("Store for scheme %s not found") % scheme LOG.warn(msg) raise HTTPBadRequest(explanation=msg, request=request, content_type='text/plain') class ImageDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" def _deserialize(self, request): result = {} try: result['image_meta'] = utils.get_image_meta_from_headers(request) except exception.InvalidParameterValue as e: msg = encodeutils.exception_to_unicode(e) LOG.warn(msg, exc_info=True) raise HTTPBadRequest(explanation=e.msg, request=request) image_meta = result['image_meta'] image_meta = validate_image_meta(request, image_meta) if request.content_length: image_size = request.content_length elif 'size' in image_meta: image_size = image_meta['size'] else: image_size = None data = request.body_file if self.has_body(request) else None if image_size is None and data is not None: data = utils.LimitingReader(data, CONF.image_size_cap) # NOTE(bcwaldon): this is a hack to make sure the downstream code # gets the correct image data request.body_file = data elif image_size is not None and image_size > CONF.image_size_cap: max_image_size = CONF.image_size_cap msg = (_("Denying attempt to upload image larger than %d" " bytes.") % max_image_size) LOG.warn(msg) raise HTTPBadRequest(explanation=msg, request=request) result['image_data'] = data return result def create(self, request): return self._deserialize(request) def update(self, request): return self._deserialize(request) class ImageSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" def __init__(self): self.notifier = notifier.Notifier() def _inject_location_header(self, response, image_meta): location = self._get_image_location(image_meta) if six.PY2: location = location.encode('utf-8') response.headers['Location'] = location def _inject_checksum_header(self, response, image_meta): if image_meta['checksum'] is not None: checksum = image_meta['checksum'] if six.PY2: checksum = checksum.encode('utf-8') response.headers['ETag'] = checksum def _inject_image_meta_headers(self, response, image_meta): """ Given a response and mapping of image metadata, injects the Response with a set of HTTP headers for the image metadata. Each main image metadata field is injected as a HTTP header with key 'x-image-meta-' except for the properties field, which is further broken out into a set of 'x-image-meta-property-' headers :param response: The Webob Response object :param image_meta: Mapping of image metadata """ headers = utils.image_meta_to_http_headers(image_meta) for k, v in headers.items(): if six.PY3: response.headers[str(k)] = str(v) else: response.headers[k.encode('utf-8')] = v.encode('utf-8') def _get_image_location(self, image_meta): """Build a relative url to reach the image defined by image_meta.""" return "/v1/images/%s" % image_meta['id'] def meta(self, response, result): image_meta = result['image_meta'] self._inject_image_meta_headers(response, image_meta) self._inject_checksum_header(response, image_meta) return response def show(self, response, result): image_meta = result['image_meta'] image_iter = result['image_iterator'] # image_meta['size'] should be an int, but could possibly be a str expected_size = int(image_meta['size']) response.app_iter = common.size_checked_iter( response, image_meta, expected_size, image_iter, self.notifier) # Using app_iter blanks content-length, so we set it here... response.headers['Content-Length'] = str(image_meta['size']) response.headers['Content-Type'] = 'application/octet-stream' self._inject_image_meta_headers(response, image_meta) self._inject_checksum_header(response, image_meta) return response def update(self, response, result): image_meta = result['image_meta'] response.body = self.to_json(dict(image=image_meta)) response.headers['Content-Type'] = 'application/json' self._inject_checksum_header(response, image_meta) return response def create(self, response, result): image_meta = result['image_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(image=image_meta)) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create_resource(): """Images resource factory method""" deserializer = ImageDeserializer() serializer = ImageSerializer() return wsgi.Resource(Controller(), deserializer, serializer) glance-12.0.0/glance/api/v3/0000775000567000056710000000000012701407204016513 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/v3/__init__.py0000664000567000056710000000000012701407047020617 0ustar jenkinsjenkins00000000000000glance-12.0.0/glance/api/v3/router.py0000664000567000056710000000564712701407047020426 0ustar jenkinsjenkins00000000000000# Copyright (c) 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from glance.common import wsgi from glance.i18n import _LW CONF = cfg.CONF LOG = logging.getLogger(__name__) """v3 controller stub Since Glance Artifact Service was released in Liberty as experimental Glance v3 API, its router was referenced in paste configuration as glance.api.v3.router In Mitaka the Artifacts Service was moved into a standalone process and its router was renamed to glance.api.artifacts.router. However, in existing deployments the glance-api-paste.ini may still reference the glance.api.v3.router. To not break these deployments this stub is included to redirect the v3 request to glare service (if it is present) or return a 410 otherwise. This stub controller should be removed in future releases. """ class API(wsgi.Router): def __init__(self, mapper): versionutils.report_deprecated_feature( LOG, _LW('/v3 controller is deprecated and will be removed from ' 'glance-api soon. Remove the reference to it from ' 'glance-api-paste.ini configuration file and use Glance ' 'Artifact Service API instead')) redirector = self._get_redirector() mapper.connect(None, "/artifacts", controller=redirector, action='redirect') mapper.connect(None, "/artifacts/{path:.*}", controller=redirector, action='redirect') super(API, self).__init__(mapper) def _get_redirector(self): return wsgi.Resource(RedirectController(), serializer=RedirectResponseSerializer()) class RedirectController(object): def redirect(self, req, path=None): try: glare_endpoint = next((s['endpoints'] for s in req.context.service_catalog if s['type'] == 'artifact'))[0]['publicURL'] if path: path = '/' + path return '{0}/v0.1/artifacts{1}'.format(glare_endpoint, path or "") except StopIteration: return None class RedirectResponseSerializer(wsgi.JSONResponseSerializer): def default(self, response, res): if res: response.location = res response.status_int = 301 else: response.status_int = 410 glance-12.0.0/babel.cfg0000664000567000056710000000002012701407047015704 0ustar jenkinsjenkins00000000000000[python: **.py] glance-12.0.0/.coveragerc0000664000567000056710000000015412701407047016307 0ustar jenkinsjenkins00000000000000[run] branch = True source = glance omit = glance/tests/*,glance/openstack/* [report] ignore_errors = True glance-12.0.0/setup.cfg0000664000567000056710000000522212701407204016003 0ustar jenkinsjenkins00000000000000[metadata] name = glance summary = OpenStack Image Service description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://docs.openstack.org/developer/glance/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 [files] data_files = etc/glance/metadefs = etc/metadefs/* packages = glance [entry_points] console_scripts = glance-api = glance.cmd.api:main glance-cache-prefetcher = glance.cmd.cache_prefetcher:main glance-cache-pruner = glance.cmd.cache_pruner:main glance-cache-manage = glance.cmd.cache_manage:main glance-cache-cleaner = glance.cmd.cache_cleaner:main glance-control = glance.cmd.control:main glance-manage = glance.cmd.manage:main glance-registry = glance.cmd.registry:main glance-replicator = glance.cmd.replicator:main glance-scrubber = glance.cmd.scrubber:main glance-glare = glance.cmd.glare:main glance.common.image_location_strategy.modules = location_order_strategy = glance.common.location_strategy.location_order store_type_strategy = glance.common.location_strategy.store_type oslo.config.opts = glance.api = glance.opts:list_api_opts glance.registry = glance.opts:list_registry_opts glance.scrubber = glance.opts:list_scrubber_opts glance.cache= glance.opts:list_cache_opts glance.manage = glance.opts:list_manage_opts glance.glare = glance.opts:list_artifacts_opts oslo.config.opts.defaults = glance.api = glance.common.config:set_cors_middleware_defaults glance.database.migration_backend = sqlalchemy = oslo_db.sqlalchemy.migration glance.database.metadata_backend = sqlalchemy = glance.db.sqlalchemy.metadata glance.artifacts.types = MyArtifact = glance.contrib.plugins.artifacts_sample:MY_ARTIFACT glance.flows = import = glance.async.flows.base_import:get_flow glance.flows.import = convert = glance.async.flows.convert:get_flow introspect = glance.async.flows.introspect:get_flow ovf_process = glance.async.flows.ovf_process:get_flow [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [compile_catalog] directory = glance/locale domain = glance [update_catalog] domain = glance output_dir = glance/locale input_file = glance/locale/glance.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = glance/locale/glance.pot [pbr] autodoc_tree_index_modules = True glance-12.0.0/tools/0000775000567000056710000000000012701407204015321 5ustar jenkinsjenkins00000000000000glance-12.0.0/tools/install_venv_common.py0000664000567000056710000001350712701407047021762 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() glance-12.0.0/tools/install_venv.py0000664000567000056710000000452712701407047020414 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Glance's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv # noqa def print_help(): help = """ Glance development environment setup is complete. Glance development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Glance virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Glance' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() install.run_command([os.path.join(venv, 'bin/python'), 'setup.py', 'develop']) print_help() if __name__ == '__main__': main(sys.argv) glance-12.0.0/tools/with_venv.sh0000775000567000056710000000033212701407047017674 0ustar jenkinsjenkins00000000000000#!/bin/bash TOOLS_PATH=${TOOLS_PATH:-$(dirname $0)} VENV_PATH=${VENV_PATH:-${TOOLS_PATH}} VENV_DIR=${VENV_NAME:-/../.venv} TOOLS=${TOOLS_PATH} VENV=${VENV:-${VENV_PATH}/${VENV_DIR}} source ${VENV}/bin/activate && "$@" glance-12.0.0/tools/migrate_image_owners.py0000664000567000056710000000766712701407047022107 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import keystoneclient.v2_0.client from oslo_config import cfg from oslo_log import log as logging import glance.context import glance.db.sqlalchemy.api as db_api from glance.i18n import _LC, _LE, _LI import glance.registry.context LOG = logging.getLogger(__name__) LOG.addHandler(logging.StreamHandler()) LOG.setLevel(logging.DEBUG) def get_owner_map(ksclient, owner_is_tenant=True): if owner_is_tenant: entities = ksclient.tenants.list() else: entities = ksclient.users.list() # build mapping of (user or tenant) name to id return {entity.name: entity.id for entity in entities} def build_image_owner_map(owner_map, db, context): image_owner_map = {} for image in db.image_get_all(context): image_id = image['id'] owner_name = image['owner'] if not owner_name: LOG.info(_LI('Image %s has no owner. Skipping.') % image_id) continue try: owner_id = owner_map[owner_name] except KeyError: msg = (_LE('Image "%(image)s" owner "%(owner)s" was not found. ' 'Skipping.'), {'image': image_id, 'owner': owner_name}) LOG.error(msg) continue image_owner_map[image_id] = owner_id LOG.info(_LI('Image "%(image)s" owner "%(owner)s" -> "%(owner_id)s"'), {'image': image_id, 'owner': owner_name, 'owner_id': owner_id}) return image_owner_map def update_image_owners(image_owner_map, db, context): for (image_id, image_owner) in image_owner_map.items(): db.image_update(context, image_id, {'owner': image_owner}) LOG.info(_LI('Image %s successfully updated.'), image_id) if __name__ == "__main__": config = cfg.CONF extra_cli_opts = [ cfg.BoolOpt('dry-run', help='Print output but do not make db changes.'), cfg.StrOpt('keystone-auth-uri', help='Authentication endpoint'), cfg.StrOpt('keystone-admin-tenant-name', help='Administrative user\'s tenant name'), cfg.StrOpt('keystone-admin-user', help='Administrative user\'s id'), cfg.StrOpt('keystone-admin-password', help='Administrative user\'s password', secret=True), ] config.register_cli_opts(extra_cli_opts) config(project='glance', prog='glance-registry') db_api.configure_db() context = glance.common.context.RequestContext(is_admin=True) auth_uri = config.keystone_auth_uri admin_tenant_name = config.keystone_admin_tenant_name admin_user = config.keystone_admin_user admin_password = config.keystone_admin_password if not (auth_uri and admin_tenant_name and admin_user and admin_password): LOG.critical(_LC('Missing authentication arguments')) sys.exit(1) ks = keystoneclient.v2_0.client.Client(username=admin_user, password=admin_password, tenant_name=admin_tenant_name, auth_url=auth_uri) owner_map = get_owner_map(ks, config.owner_is_tenant) image_updates = build_image_owner_map(owner_map, db_api, context) if not config.dry_run: update_image_owners(image_updates, db_api, context) glance-12.0.0/tools/colorizer.py0000775000567000056710000002664412701407047017727 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013, Nebula, Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Display a subunit stream through a colorized unittest test runner.""" import heapq import sys import unittest import subunit import testtools class _AnsiColorizer(object): """A colorizer is an object that loosely wraps around a stream. That allows callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream @staticmethod def supported(stream=sys.stdout): """Method that checks if the current terminal supports coloring. Returns True or False. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except Exception: # guess false in case of error return False def write(self, text, color): """Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) class _Win32Colorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): import win32console red, green, blue, bold = (win32console.FOREGROUND_RED, win32console.FOREGROUND_GREEN, win32console.FOREGROUND_BLUE, win32console.FOREGROUND_INTENSITY) self.stream = stream self.screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, 'green': green | bold, 'blue': blue | bold, 'yellow': red | green | bold, 'magenta': red | blue | bold, 'cyan': green | blue | bold, 'white': red | green | blue | bold } @staticmethod def supported(stream=sys.stdout): try: import win32console screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) except ImportError: return False import pywintypes try: screenBuffer.SetConsoleTextAttribute( win32console.FOREGROUND_RED | win32console.FOREGROUND_GREEN | win32console.FOREGROUND_BLUE) except pywintypes.error: return False else: return True def write(self, text, color): color = self._colors[color] self.screenBuffer.SetConsoleTextAttribute(color) self.stream.write(text) self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) class _NullColorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): self.stream = stream @staticmethod def supported(stream=sys.stdout): return True def write(self, text, color): self.stream.write(text) def get_elapsed_time_color(elapsed_time): if elapsed_time > 1.0: return 'red' elif elapsed_time > 0.25: return 'yellow' else: return 'green' class SubunitTestResult(testtools.TestResult): def __init__(self, stream, descriptions, verbosity): super(SubunitTestResult, self).__init__() self.stream = stream self.showAll = verbosity > 1 self.num_slow_tests = 10 self.slow_tests = [] # this is a fixed-sized heap self.colorizer = None # NOTE(vish): reset stdout for the terminal check stdout = sys.stdout sys.stdout = sys.__stdout__ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: if colorizer.supported(): self.colorizer = colorizer(self.stream) break sys.stdout = stdout self.start_time = None self.last_time = {} self.results = {} self.last_written = None def _writeElapsedTime(self, elapsed): color = get_elapsed_time_color(elapsed) self.colorizer.write(" %.2f" % elapsed, color) def _addResult(self, test, *args): try: name = test.id() except AttributeError: name = 'Unknown.unknown' test_class, test_name = name.rsplit('.', 1) elapsed = (self._now() - self.start_time).total_seconds() item = (elapsed, test_class, test_name) if len(self.slow_tests) >= self.num_slow_tests: heapq.heappushpop(self.slow_tests, item) else: heapq.heappush(self.slow_tests, item) self.results.setdefault(test_class, []) self.results[test_class].append((test_name, elapsed) + args) self.last_time[test_class] = self._now() self.writeTests() def _writeResult(self, test_name, elapsed, long_result, color, short_result, success): if self.showAll: self.stream.write(' %s' % str(test_name).ljust(66)) self.colorizer.write(long_result, color) if success: self._writeElapsedTime(elapsed) self.stream.writeln() else: self.colorizer.write(short_result, color) def addSuccess(self, test): super(SubunitTestResult, self).addSuccess(test) self._addResult(test, 'OK', 'green', '.', True) def addFailure(self, test, err): if test.id() == 'process-returncode': return super(SubunitTestResult, self).addFailure(test, err) self._addResult(test, 'FAIL', 'red', 'F', False) def addError(self, test, err): super(SubunitTestResult, self).addFailure(test, err) self._addResult(test, 'ERROR', 'red', 'E', False) def addSkip(self, test, reason=None, details=None): super(SubunitTestResult, self).addSkip(test, reason, details) self._addResult(test, 'SKIP', 'blue', 'S', True) def startTest(self, test): self.start_time = self._now() super(SubunitTestResult, self).startTest(test) def writeTestCase(self, cls): if not self.results.get(cls): return if cls != self.last_written: self.colorizer.write(cls, 'white') self.stream.writeln() for result in self.results[cls]: self._writeResult(*result) del self.results[cls] self.stream.flush() self.last_written = cls def writeTests(self): time = self.last_time.get(self.last_written, self._now()) if not self.last_written or (self._now() - time).total_seconds() > 2.0: diff = 3.0 while diff > 2.0: classes = self.results.keys() oldest = min(classes, key=lambda x: self.last_time[x]) diff = (self._now() - self.last_time[oldest]).total_seconds() self.writeTestCase(oldest) else: self.writeTestCase(self.last_written) def done(self): self.stopTestRun() def stopTestRun(self): for cls in list(self.results.keys()): self.writeTestCase(cls) self.stream.writeln() self.writeSlowTests() def writeSlowTests(self): # Pare out 'fast' tests slow_tests = [item for item in self.slow_tests if get_elapsed_time_color(item[0]) != 'green'] if slow_tests: slow_total_time = sum(item[0] for item in slow_tests) slow = ("Slowest %i tests took %.2f secs:" % (len(slow_tests), slow_total_time)) self.colorizer.write(slow, 'yellow') self.stream.writeln() last_cls = None # sort by name for elapsed, cls, name in sorted(slow_tests, key=lambda x: x[1] + x[2]): if cls != last_cls: self.colorizer.write(cls, 'white') self.stream.writeln() last_cls = cls self.stream.write(' %s' % str(name).ljust(68)) self._writeElapsedTime(elapsed) self.stream.writeln() def printErrors(self): if self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavor, errors): for test, err in errors: self.colorizer.write("=" * 70, 'red') self.stream.writeln() self.colorizer.write(flavor, 'red') self.stream.writeln(": %s" % test.id()) self.colorizer.write("-" * 70, 'red') self.stream.writeln() self.stream.writeln("%s" % err) test = subunit.ProtocolTestCase(sys.stdin, passthrough=None) if sys.version_info[0:2] <= (2, 6): runner = unittest.TextTestRunner(verbosity=2) else: runner = unittest.TextTestRunner( verbosity=2, resultclass=SubunitTestResult) if runner.run(test).wasSuccessful(): exit_code = 0 else: exit_code = 1 sys.exit(exit_code) glance-12.0.0/doc/0000775000567000056710000000000012701407204014726 5ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/0000775000567000056710000000000012701407204016226 5ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/formats.rst0000664000567000056710000000615212701407047020444 0ustar jenkinsjenkins00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Disk and Container Formats ========================== When adding an image to Glance, you must specify what the virtual machine image's *disk format* and *container format* are. Disk and container formats are configurable on a per-deployment basis. This document intends to establish a global convention for what specific values of *disk_format* and *container_format* mean. Disk Format ----------- The disk format of a virtual machine image is the format of the underlying disk image. Virtual appliance vendors have different formats for laying out the information contained in a virtual machine disk image. You can set your image's disk format to one of the following: * **raw** This is an unstructured disk image format * **vhd** This is the VHD disk format, a common disk format used by virtual machine monitors from VMWare, Xen, Microsoft, VirtualBox, and others * **vmdk** Another common disk format supported by many common virtual machine monitors * **vdi** A disk format supported by VirtualBox virtual machine monitor and the QEMU emulator * **iso** An archive format for the data contents of an optical disc (e.g. CDROM). * **qcow2** A disk format supported by the QEMU emulator that can expand dynamically and supports Copy on Write * **aki** This indicates what is stored in Glance is an Amazon kernel image * **ari** This indicates what is stored in Glance is an Amazon ramdisk image * **ami** This indicates what is stored in Glance is an Amazon machine image Container Format ---------------- The container format refers to whether the virtual machine image is in a file format that also contains metadata about the actual virtual machine. Note that the container format string is not currently used by Glance or other OpenStack components, so it is safe to simply specify **bare** as the container format if you are unsure. You can set your image's container format to one of the following: * **bare** This indicates there is no container or metadata envelope for the image * **ovf** This is the OVF container format * **aki** This indicates what is stored in Glance is an Amazon kernel image * **ari** This indicates what is stored in Glance is an Amazon ramdisk image * **ami** This indicates what is stored in Glance is an Amazon machine image * **ova** This indicates what is stored in Glance is an OVA tar archive file * **docker** This indicates what is stored in Glance is a Docker tar archive of the container filesystem glance-12.0.0/doc/source/property-protections.rst0000664000567000056710000001145412701407047023225 0ustar jenkinsjenkins00000000000000.. Copyright 2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Property Protections ==================== There are two types of image properties in Glance: * Core Properties, as specified by the image schema. * Meta Properties, which are arbitrary key/value pairs that can be added to an image. Access to meta properties through Glance's public API calls may be restricted to certain sets of users, using a property protections configuration file. This document explains exactly how property protections are configured and what they apply to. Constructing a Property Protections Configuration File ------------------------------------------------------ A property protections configuration file follows the format of the Glance API configuration file, which consists of sections, led by a ``[section]`` header and followed by ``name = value`` entries. Each section header is a regular expression matching a set of properties to be protected. .. note:: Section headers must compile to a valid regular expression, otherwise glance api service will not start. Regular expressions will be handled by python's re module which is PERL like. Each section describes four key-value pairs, where the key is one of ``create/read/update/delete``, and the value is a comma separated list of user roles that are permitted to perform that operation in the Glance API. **If any of the keys are not specified, then the glance api service will not start successfully.** In the list of user roles, ``@`` means all roles and ``!`` means no role. **If both @ and ! are specified for the same rule then the glance api service will not start** .. note:: Only one policy rule is allowed per property operation. **If multiple are specified, then the glance api service will not start.** The path to the file should be specified in the ``[DEFAULT]`` section of ``glance-api.conf`` as follows. :: property_protection_file=/path/to/file If this config value is not specified, property protections are not enforced. **If the path is invalid, glance api service will not start successfully.** The file may use either roles or policies to describe the property protections. The config value should be specified in the ``[DEFAULT]`` section of ``glance-api.conf`` as follows. :: property_protection_rule_format= The default value for ``property_protection_rule_format`` is ``roles``. Property protections are applied in the order specified in the configuration file. This means that if for example you specify a section with ``[.*]`` at the top of the file, all proceeding sections will be ignored. If a property does not match any of the given rules, all operations will be disabled for all roles. If an operation is misspelled or omitted, that operation will be disabled for all roles. Disallowing ``read`` operations will also disallow ``update/delete`` operations. A successful HTTP request will return status ``200 OK``. If the user is not permitted to perform the requested action, ``403 Forbidden`` will be returned. V1 API X-glance-registry-Purge-props ------------------------------------ Property protections will still be honoured if ``X-glance-registry-Purge-props`` is set to ``True``. That is, if you request to modify properties with this header set to ``True``, you will not be able to delete or update properties for which you do not have the relevant permissions. Properties which are not included in the request and for which you do have delete permissions will still be removed. Examples -------- **Example 1**. Limit all property interactions to admin only. :: [.*] create = admin read = admin update = admin delete = admin **Example 2**. Allow both admins and users with the billing role to read and modify properties prefixed with ``x_billing_code_``. Allow admins to read and modify any properties. :: [^x_billing_code_.*] create = admin,billing read = admin, billing update = admin,billing delete = admin,billing [.*] create = admin read = admin update = admin delete = admin **Example 3**. Limit all property interactions to admin only using policy rule context_is_admin defined in policy.json. :: [.*] create = context_is_admin read = context_is_admin update = context_is_admin delete = context_is_admin glance-12.0.0/doc/source/index.rst0000664000567000056710000000570012701407047020076 0ustar jenkinsjenkins00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Welcome to Glance's documentation! ================================== The Glance project provides a service where users can upload and discover data assets that are meant to be used with other services. This currently includes images and metadata definitions. Glance image services include discovering, registering, and retrieving virtual machine images. Glance has a RESTful API that allows querying of VM image metadata as well as retrieval of the actual image. VM images made available through Glance can be stored in a variety of locations from simple filesystems to object-storage systems like the OpenStack Swift project. Glance, as with all OpenStack projects, is written with the following design guidelines in mind: * **Component based architecture**: Quickly add new behaviors * **Highly available**: Scale to very serious workloads * **Fault tolerant**: Isolated processes avoid cascading failures * **Recoverable**: Failures should be easy to diagnose, debug, and rectify * **Open standards**: Be a reference implementation for a community-driven api This documentation is generated by the Sphinx toolkit and lives in the source tree. Additional documentation on Glance and other components of OpenStack can be found on the `OpenStack wiki`_. .. _`OpenStack wiki`: http://wiki.openstack.org Glance Background Concepts ========================== .. toctree:: :maxdepth: 1 architecture database_architecture domain_model domain_implementation identifiers statuses tasks formats common-image-properties metadefs-concepts Installing/Configuring Glance ============================= .. toctree:: :maxdepth: 1 installing configuring authentication policies image-location-strategy-modules artifact-types flows property-protections opts/index Operating Glance ================ .. toctree:: :maxdepth: 1 controllingservers db cache notifications Using Glance ============ .. toctree:: :maxdepth: 1 glanceapi glanceclient glancemetadefcatalogapi Glance Manual Pages =================== .. toctree:: :glob: :maxdepth: 1 man/* api/modules Contributing to Glance ====================== .. toctree:: :maxdepth: 1 contributing/index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`glance-12.0.0/doc/source/domain_model.rst0000664000567000056710000002355612701407047021427 0ustar jenkinsjenkins00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============ Domain model ============ The main goal of a domain model is refactoring the logic around object manipulation by splitting it to independent layers. Each subsequent layer wraps the previous one creating an "onion" structure, thus realizing a design pattern called "Decorator." The main feature of domain model is to use a composition instead of inheritance or basic decoration while building an architecture. This provides flexibility and transparency of an internal organization for a developer, because he does not know what layers are used and works with a domain model object as with a common object. Inner architecture ~~~~~~~~~~~~~~~~~~ Each layer defines its own operations’ implementation through a special ``proxy`` class. At first, operations are performed on the upper layer, then they successively pass the control to the underlying layers. The nesting of layers can be specified explicitly using a programmer interface Gateway or implicitly using ``helper`` classes. Nesting may also depend on various conditions, skipping or adding additional layers during domain object creation. Proxies ~~~~~~~ The layer behavior is described in special ``proxy`` classes that must provide exactly the same interface as the original class does. In addition, each ``proxy`` class has a field ``base`` indicating a lower layer object that is an instance of another ``proxy`` or ``original`` class. To access the rest of the fields, you can use special ``proxy`` properties or universal methods ``set_property`` and ``get_property``. In addition, the ``proxy`` class must have an ``__init__`` format method:: def __init__(self, base, helper_class=None, helper_kwargs=None, **kwargs) where ``base`` corresponds to the underlying object layer, ``proxy_class`` and ``proxy_kwargs`` are optional and are used to create a ``helper`` class. Thus, to access a ``meth1`` method from the underlying layer, it is enough to call it on the ``base`` object:: def meth1(*args, **kwargs): … self.base.meth1(*args, **kwargs) … To get access to the domain object field, it is recommended to use properties that are created by an auxiliary function:: def _create_property_proxy(attr): def get_attr(self): return getattr(self.base, attr) def set_attr(self, value): return setattr(self.base, attr, value) def del_attr(self): return delattr(self.base, attr) return property(get_attr, set_attr, del_attr) So, the reference to the underlying layer field ``prop1`` looks like:: class Proxy(object): … prop1 = _create_property_proxy('prop1') … If the number of layers is big, it is reasonable to create a common parent ``proxy`` class that provides further control transfer. This facilitates the writing of specific layers if they do not provide a particular implementation of some operation. Gateway ~~~~~~~ ``gateway`` is a mechanism to explicitly specify a composition of the domain model layers. It defines an interface to retrieve the domain model object based on the ``proxy`` classes described above. Example of the gateway implementation ------------------------------------- This example defines three classes: * ``Base`` is the main class that sets an interface for all the ``proxy`` classes. * ``LoggerProxy`` class implements additional logic associated with the logging of messages from the ``print_msg`` method. * ``ValidatorProxy`` class implements an optional check that helps to determine whether all the parameters in the ``sum_numbers`` method are positive. :: class Base(object): ""Base class in domain model.""" msg = "Hello Domain" def print_msg(self): print(self.msg) def sum_numbers(self, *args): return sum(args) class LoggerProxy(object): """"Class extends functionality by writing message to log.""" def __init__(self, base, logg): self.base = base self.logg = logg # Proxy to provide implicit access to inner layer. msg = _create_property_proxy('msg') def print_msg(self): # Write message to log and then pass the control to inner layer. self.logg.write("Message %s has been written to the log") % self.msg self.base.print_msg() def sum_numbers(self, *args): # Nothing to do here. Just pass the control to the next layer. return self.base.sum_numbers(*args) class ValidatorProxy(object): """Class validates that input parameters are correct.""" def __init__(self, base): self.base = base msg = _create_property_proxy('msg') def print_msg(self): # There are no checks. self.base.print_msg() def sum_numbers(self, *args): # Validate input numbers and pass them further. for arg in args: if arg <= 0: return "Only positive numbers are supported." return self.base.sum_numbers(*args) Thus, the ``gateway`` method for the above example may look like: :: def gateway(logg, only_positive=True): base = Base() logger = LoggerProxy(base, logg) if only_positive: return ValidatorProxy(logger) return logger domain_object = gateway(sys.stdout, only_positive=True) It is important to consider that the order of the layers matters. And even if layers are logically independent from each other, rearranging them in different order may lead to another result. Helpers ~~~~~~~ ``Helper`` objects are used for an implicit nesting assignment that is based on a specification described in an auxiliary method (similar to ``gateway``). This approach may be helpful when using a *simple factory* for generating objects. Such a way is more flexible as it allows specifying the wrappers dynamically. The ``helper`` class is unique for all the ``proxy`` classes and it has the following form: :: class Helper(object): def __init__(self, proxy_class=None, proxy_kwargs=None): self.proxy_class = proxy_class self.proxy_kwargs = proxy_kwargs or {} def proxy(self, obj): """Wrap an object.""" if obj is None or self.proxy_class is None: return obj return self.proxy_class(obj, **self.proxy_kwargs) def unproxy(self, obj): """Return object from inner layer.""" if obj is None or self.proxy_class is None: return obj return obj.base Example of a simple factory implementation ------------------------------------------ Here is a code of a *simple factory* for generating objects from the previous example. It specifies a ``BaseFactory`` class with a ``generate`` method and related ``proxy`` classes: :: class BaseFactory(object): """Simple factory to generate an object.""" def generate(self): return Base() class LoggerFactory(object): """Proxy class to add logging functionality.""" def __init__(self, base, proxy_class=None, proxy_kwargs=None): self.helper = Helper(proxy_class, proxy_kwargs) self.base = base self.logg = logg def generate(self): return self.helper.proxy(self.base.generate()) class ValidatorFactory(object): """Proxy class to add validation.""" def __init__(self, base, only_positive=True, proxy_class=None, proxy_kwargs=None): self.helper = Helper(proxy_class, proxy_kwargs) self.base = base self.only_positive = only_positive def generate(self): if self.only_positive: # Wrap in ValidatorProxy if required. return self.helper.proxy(self.base.generate()) return self.base.generate() Further, ``BaseFactory`` and related ``proxy`` classes are combined together: :: def create_factory(logg, only_positive=True): base_factory = BaseFactory() logger_factory = LoggerFactory(base_factory, proxy_class=LoggerProxy, proxy_kwargs=dict(logg=logg)) validator_factory = ValidatorFactory(logger_factory, only_positive, proxy_class = ValidatorProxy) return validator_factory Ultimately, to generate a domain object, you create and run a factory method ``generate`` which implicitly creates a composite object. This method is based on specifications that are set forth in the ``proxy`` class. :: factory = create_factory(logg, only_positive=False) domain_object = factory.generate() Why do you need a domain if you can use decorators? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the above examples, to implement the planned logic, it is quite possible to use standard Python language techniques such as decorators. However, to implement more complicated operations, the domain model is reasonable and justified. In general, the domain is useful when: * there are more than three layers. In such case, the domain model usage facilitates the understanding and supporting of the code; * wrapping must be implemented depending on some conditions, including dynamic wrapping; * there is a requirement to wrap objects implicitly by helpers. glance-12.0.0/doc/source/contributing/0000775000567000056710000000000012701407204020735 5ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/contributing/index.rst0000664000567000056710000000070512701407051022600 0ustar jenkinsjenkins00000000000000Glance Contribution Guidelines ============================== In the Contributions Guide, you will find documented policies for developing with Glance. This includes the processes we use for blueprints and specs, bugs, contributor onboarding, core reviewer memberships, and other procedural items. Policies -------- .. toctree:: :maxdepth: 3 blueprints .. bugs contributor-onboarding core-reviewers gate-failure-triage code-reviews glance-12.0.0/doc/source/contributing/blueprints.rst0000664000567000056710000001446112701407051023664 0ustar jenkinsjenkins00000000000000Blueprints and Specs ==================== The Glance team uses the `glance-specs `_ repository for its specification reviews. Detailed information can be found `here `_. Please also find additional information in the reviews.rst file. The Glance team enforces a deadline for specs proposals. It's a soft freeze that happens after the first milestone is cut and before the second milestone is out. There's a freeze exception week that follows the freeze week. A new proposal can still be submitted during this period, but be aware that it will most likely be postponed unless a particularly good argument is made in favor of having an exception for it. Please note that we use a `template `_ for spec submissions. It is not required to fill out all sections in the template. Review of the spec may require filling in information left out by the submitter. Spec Notes ---------- There are occasions when a spec will be approved and the code will not land in the cycle it was targeted at. For these cases, the work flow to get the spec into the next release is as follows: * Anyone can propose a patch to glance-specs which moves a spec from the previous release into the new release directory. .. NOTE: mention the `approved`, `implemented` dirs The specs which are moved in this way can be fast-tracked into the next release. Please note that it is required to re-propose the spec for the new release however and that it'll be evaluated based on the resources available and cycle priorities. Glance Spec Lite ---------------- In Mitaka the team introduced the concept of lite specs. Lite specs are small features tracked as Launchpad bugs, with status `wishlist` and tagged with the new 'spec-lite' tag, and allow for the submission and review of these feature requests before code is submitted. This allows for small features that don't warrant a detailed spec to be proposed, evaluated, and worked on. The team evaluates these requests as it evaluates specs. Once a bug has been approved as a Request for Enhancement (RFE), it'll be targeted for a release. The workflow for the life of a spec-lite in Launchpad is as follows: * File a bug with a small summary of what the request change is following the format below: .. NOTE: add format * The bug is triaged and tagged with the `spec-lite` tag. * The bug is evaluated and marked as `Triaged` to announce approval or to `Won't fix` to announce rejection or `Invalid` to request a full spec. * The bug is moved to `In Progress` once the code is up and ready to review. * The bug is moved to `Fix Committed` once the patch lands. In summary: +--------------+-----------------------------------------------------------------------------+ |State | Meaning | +==============+=============================================================================+ |New | This is where spec-lite starts, as filed by the community. | +--------------+-----------------------------------------------------------------------------+ |Triaged | Drivers - Move to this state to mean, "you can start working on it" | +--------------+-----------------------------------------------------------------------------+ |Won't Fix | Drivers - Move to this state to reject a lite-spec. | +--------------+-----------------------------------------------------------------------------+ |Invalid | Drivers - Move to this state to request a full spec for this request | +--------------+-----------------------------------------------------------------------------+ The drivers team will be discussing the following bug reports during their IRC meeting: * `New RFE's `_ * `New RFE's `_ * `New RFE's `_ Lite spec Submission Guidelines ------------------------------- Before we dive into the guidelines for writing a good lite spec, it is worth mentioning that depending on your level of engagement with the Glance project and your role (user, developer, deployer, operator, etc.), you are more than welcome to have a preliminary discussion of a potential lite spec by reaching out to other people involved in the project. This usually happens by posting mails on the relevant mailing lists (e.g. `openstack-dev `_ - include [glance] in the subject) or on #openstack-glance IRC channel on Freenode. If current ongoing code reviews are related to your feature, posting comments/questions on gerrit may also be a way to engage. Some amount of interaction with Glance developers will give you an idea of the plausibility and form of your lite spec before you submit it. That said, this is not mandatory. When you submit a bug report on https://bugs.launchpad.net/glance/+filebug, there are two fields that must be filled: 'summary' and 'further information'. The 'summary' must be brief enough to fit in one line: if you can't describe it in a few words it may mean that you are either trying to capture more than one lite spec at once, or that you are having a hard time defining what you are trying to solve at all. The 'further information' section must be a description of what you would like to see implemented in Glance. The description should provide enough details for a knowledgeable developer to understand what is the existing problem and what's the proposed solution. Once you are happy with what you wrote, set the importance to `Wishlist`, and submit. Do not worry, we are here to help you get it right! Happy hacking. Lite spec from existing bugs ---------------------------- If there's an already existing bug that describes a small feature suitable for a spec-lite, all you need to do is change the importance field to `Wishlist`. Please don't create a new bug! The comments and history of the existing bug are important for the spec-lite review. glance-12.0.0/doc/source/policies.rst0000664000567000056710000001306612701407047020602 0ustar jenkinsjenkins00000000000000.. Copyright 2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Policies ======== Glance's public API calls may be restricted to certain sets of users using a policy configuration file. This document explains exactly how policies are configured and what they apply to. A policy is composed of a set of rules that are used by the policy "Brain" in determining if a particular action may be performed by the authorized tenant. Constructing a Policy Configuration File ---------------------------------------- A policy configuration file is a simply JSON object that contain sets of rules. Each top-level key is the name of a rule. Each rule is a string that describes an action that may be performed in the Glance API. The actions that may have a rule enforced on them are: * ``get_images`` - List available image entities * ``GET /v1/images`` * ``GET /v1/images/detail`` * ``GET /v2/images`` * ``get_image`` - Retrieve a specific image entity * ``HEAD /v1/images/`` * ``GET /v1/images/`` * ``GET /v2/images/`` * ``download_image`` - Download binary image data * ``GET /v1/images/`` * ``GET /v2/images//file`` * ``upload_image`` - Upload binary image data * ``POST /v1/images`` * ``PUT /v1/images/`` * ``PUT /v2/images//file`` * ``copy_from`` - Copy binary image data from URL * ``POST /v1/images`` * ``PUT /v1/images/`` * ``add_image`` - Create an image entity * ``POST /v1/images`` * ``POST /v2/images`` * ``modify_image`` - Update an image entity * ``PUT /v1/images/`` * ``PUT /v2/images/`` * ``publicize_image`` - Create or update images with attribute * ``POST /v1/images`` with attribute ``is_public`` = ``true`` * ``PUT /v1/images/`` with attribute ``is_public`` = ``true`` * ``POST /v2/images`` with attribute ``visibility`` = ``public`` * ``PUT /v2/images/`` with attribute ``visibility`` = ``public`` * ``delete_image`` - Delete an image entity and associated binary data * ``DELETE /v1/images/`` * ``DELETE /v2/images/`` * ``add_member`` - Add a membership to the member repo of an image * ``POST /v2/images//members`` * ``get_members`` - List the members of an image * ``GET /v1/images//members`` * ``GET /v2/images//members`` * ``delete_member`` - Delete a membership of an image * ``DELETE /v1/images//members/`` * ``DELETE /v2/images//members/`` * ``modify_member`` - Create or update the membership of an image * ``PUT /v1/images//members/`` * ``PUT /v1/images//members`` * ``POST /v2/images//members`` * ``PUT /v2/images//members/`` * ``manage_image_cache`` - Allowed to use the image cache management API To limit an action to a particular role or roles, you list the roles like so :: { "delete_image": ["role:admin", "role:superuser"] } The above would add a rule that only allowed users that had roles of either "admin" or "superuser" to delete an image. Writing Rules ------------- Role checks are going to continue to work exactly as they already do. If the role defined in the check is one that the user holds, then that will pass, e.g., ``role:admin``. To write a generic rule, you need to know that there are three values provided by Glance that can be used in a rule on the left side of the colon (``:``). Those values are the current user's credentials in the form of: - role - tenant - owner The left side of the colon can also contain any value that Python can understand, e.g.,: - ``True`` - ``False`` - ``"a string"`` - &c. Using ``tenant`` and ``owner`` will only work with images. Consider the following rule:: tenant:%(owner)s This will use the ``tenant`` value of the currently authenticated user. It will also use ``owner`` from the image it is acting upon. If those two values are equivalent the check will pass. All attributes on an image (as well as extra image properties) are available for use on the right side of the colon. The most useful are the following: - ``owner`` - ``protected`` - ``is_public`` Therefore, you could construct a set of rules like the following:: { "not_protected": "False:%(protected)s", "is_owner": "tenant:%(owner)s", "is_owner_or_admin": "rule:is_owner or role:admin", "not_protected_and_is_owner": "rule:not_protected and rule:is_owner", "get_image": "rule:is_owner_or_admin", "delete_image": "rule:not_protected_and_is_owner", "add_member": "rule:not_protected_and_is_owner" } Examples -------- Example 1. (The default policy configuration) :: { "default": "" } Note that an empty JSON list means that all methods of the Glance API are callable by anyone. Example 2. Disallow modification calls to non-admins :: { "default": "", "add_image": "role:admin", "modify_image": "role:admin", "delete_image": "role:admin" } glance-12.0.0/doc/source/common-image-properties.rst0000664000567000056710000000356712701407047023542 0ustar jenkinsjenkins00000000000000.. Copyright 2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Common Image Properties ======================= When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image. This document explains the names of these properties and the expected values. The common image properties are also described in a JSON schema, found in etc/schema-image.json in the Glance source code. **architecture** ---------------- Operating system architecture as specified in http://docs.openstack.org/cli-reference/glance.html#image-service-property-keys **instance_uuid** ----------------- Metadata which can be used to record which instance this image is associated with. (Informational only, does not create an instance snapshot.) **kernel_id** ------------- The ID of image stored in Glance that should be used as the kernel when booting an AMI-style image. **ramdisk_id** -------------- The ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image. **os_distro** ------------- The common name of the operating system distribution as specified in http://docs.openstack.org/cli-reference/glance.html#image-service-property-keys **os_version** -------------- The operating system version as specified by the distributor. glance-12.0.0/doc/source/glanceclient.rst0000664000567000056710000000210612701407047021414 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Using Glance's Client Tools =========================== The command-line tool and python library for Glance are both installed through the python-glanceclient project. Explore the following resources for more information: * `Official Docs `_ * `Pypi Page `_ * `GitHub Project `_ glance-12.0.0/doc/source/identifiers.rst0000664000567000056710000000201412701407047021267 0ustar jenkinsjenkins00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Image Identifiers ================= Images are uniquely identified by way of a URI that matches the following signature:: /v1/images/ where `` is the resource location of the Glance service that knows about an image, and `` is the image's identifier. Image identifiers in Glance are *uuids*, making them *globally unique*. glance-12.0.0/doc/source/domain_implementation.rst0000664000567000056710000000766612701407047023360 0ustar jenkinsjenkins00000000000000.. Copyright 2016 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================== Glance domain model implementation ================================== Gateway and basic layers ~~~~~~~~~~~~~~~~~~~~~~~~ The domain model contains the following layers: #. :ref:`authorization` #. :ref:`notifier` #. :ref:`property` #. :ref:`policy` #. :ref:`quota` #. :ref:`location` #. :ref:`database` The schema below shows a stack that contains the Image domain layers and their locations: .. figure:: /images/glance_layers.png :figwidth: 100% :align: center :alt: Image domain layers .. _authorization: Authorization ------------- The first layer of the domain model provides a verification of whether an image itself or its property can be changed. An admin or image owner can apply the changes. The information about a user is taken from the request ``context`` and is compared with the image ``owner``. If the user cannot apply a change, a corresponding error message appears. .. _property: Property protection ------------------- The second layer of the domain model is optional. It becomes available if you set the ``property_protection_file`` parameter in the Glance configuration file. There are two types of image properties in Glance: * *Core properties*, as specified in the image schema * *Meta properties*, which are the arbitrary key/value pairs that can be added to an image The property protection layer manages access to the meta properties through Glance’s public API calls. You can restrict the access in the property protection configuration file. .. _notifier: Notifier -------- On the third layer of the domain model, the following items are added to the message queue: #. Notifications about all of the image changes #. All of the exceptions and warnings that occurred while using an image .. _policy: Policy ------ The fourth layer of the domain model is responsible for: #. Defining access rules to perform actions with an image. The rules are defined in the :file:`etc/policy.json` file. #. Monitoring of the rules implementation. .. _quota: Quota ----- On the fifth layer of the domain model, if a user has an admin-defined size quota for all of his uploaded images, there is a check that verifies whether this quota exceeds the limit during an image upload and save: * If the quota does not exceed the limit, then the action to add an image succeeds. * If the quota exceeds the limit, then the action does not succeed and a corresponding error message appears. .. _location: Location -------- The sixth layer of the domain model is used for interaction with the store via the ``glance_store`` library, like upload and download, and for managing an image location. On this layer, an image is validated before the upload. If the validation succeeds, an image is written to the ``glance_store`` library. This sixth layer of the domain model is responsible for: #. Checking whether a location URI is correct when a new location is added #. Removing image data from the store when an image location is changed #. Preventing image location duplicates .. _database: Database -------- On the seventh layer of the domain model: * The methods to interact with the database API are implemented. * Images are converted to the corresponding format to be recorded in the database. And the information received from the database is converted to an Image object. glance-12.0.0/doc/source/man/0000775000567000056710000000000012701407204017001 5ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/man/footer.rst0000664000567000056710000000032312701407047021034 0ustar jenkinsjenkins00000000000000SEE ALSO ======== * `OpenStack Glance `__ BUGS ==== * Glance bugs are tracked in Launchpad so you can view current bugs at `OpenStack Glance `__ glance-12.0.0/doc/source/man/glancescrubber.rst0000664000567000056710000000327212701407047022525 0ustar jenkinsjenkins00000000000000=============== glance-scrubber =============== -------------------- Glance scrub service -------------------- :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-scrubber [options] DESCRIPTION =========== glance-scrubber is a utility that cleans up images that have been deleted. The mechanics of this differ depending on the backend store and pending_deletion options chosen. Multiple glance-scrubbers can be run in a single deployment, but only one of them may be designated as the 'cleanup_scrubber' in the glance-scrubber.conf file. The 'cleanup_scrubber' coordinates other glance-scrubbers by maintaining the master queue of images that need to be removed. The glance-scubber.conf file also specifies important configuration items such as the time between runs ('wakeup_time' in seconds), length of time images can be pending before their deletion ('cleanup_scrubber_time' in seconds) as well as registry connectivity options. glance-scrubber can run as a periodic job or long-running daemon. OPTIONS ======= **General options** .. include:: general_options.rst **-D, --daemon** Run as a long-running process. When not specified (the default) run the scrub operation once and then exits. When specified do not exit and run scrub on wakeup_time interval as specified in the config. **--nodaemon** The inverse of --daemon. Runs the scrub operation once and then exits. This is the default. FILES ====== **/etc/glance/glance-scrubber.conf** Default configuration file for the Glance Scrubber .. include:: footer.rst glance-12.0.0/doc/source/man/glancecacheprefetcher.rst0000664000567000056710000000126312701407047024027 0ustar jenkinsjenkins00000000000000======================= glance-cache-prefetcher ======================= ------------------------------ Glance Image Cache Pre-fetcher ------------------------------ :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-cache-prefetcher [options] DESCRIPTION =========== This is meant to be run from the command line after queueing images to be pretched. OPTIONS ======= **General options** .. include:: general_options.rst FILES ===== **/etc/glance/glance-cache.conf** Default configuration file for the Glance Cache .. include:: footer.rst glance-12.0.0/doc/source/man/glancecachecleaner.rst0000664000567000056710000000214012701407051023277 0ustar jenkinsjenkins00000000000000==================== glance-cache-cleaner ==================== ---------------------------------------------------------------- Glance Image Cache Invalid Cache Entry and Stalled Image cleaner ---------------------------------------------------------------- :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-cache-cleaner [options] DESCRIPTION =========== This is meant to be run as a periodic task from cron. If something goes wrong while we're caching an image (for example the fetch times out, or an exception is raised), we create an 'invalid' entry. These entires are left around for debugging purposes. However, after some period of time, we want to clean these up. Also, if an incomplete image hangs around past the image_cache_stall_time period, we automatically sweep it up. OPTIONS ======= **General options** .. include:: general_options.rst FILES ====== **/etc/glance/glance-cache.conf** Default configuration file for the Glance Cache .. include:: footer.rst glance-12.0.0/doc/source/man/glancecachepruner.rst0000664000567000056710000000135512701407047023215 0ustar jenkinsjenkins00000000000000=================== glance-cache-pruner =================== ------------------- Glance cache pruner ------------------- :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-cache-pruner [options] DESCRIPTION =========== Prunes images from the Glance cache when the space exceeds the value set in the image_cache_max_size configuration option. This is meant to be run as a periodic task, perhaps every half-hour. OPTIONS ======== **General options** .. include:: general_options.rst FILES ===== **/etc/glance/glance-cache.conf** Default configuration file for the Glance Cache .. include:: footer.rst glance-12.0.0/doc/source/man/general_options.rst0000664000567000056710000000453512701407051022732 0ustar jenkinsjenkins00000000000000 **-h, --help** Show the help message and exit **--version** Print the version number and exit **-v, --verbose** Print more verbose output **--noverbose** Disable verbose output **-d, --debug** Print debugging output (set logging level to DEBUG instead of default WARNING level) **--nodebug** Disable debugging output **--use-syslog** Use syslog for logging **--nouse-syslog** Disable the use of syslog for logging **--syslog-log-facility SYSLOG_LOG_FACILITY** syslog facility to receive log lines **--config-dir DIR** Path to a config directory to pull \*.conf files from. This file set is sorted, to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. This means that configuration from files in a specified config-dir will always take precedence over configuration from files specified by --config-file, regardless to argument order. **--config-file PATH** Path to a config file to use. Multiple config files can be specified by using this flag multiple times, for example, --config-file --config-file . Values in latter files take precedence. **--log-config-append PATH** **--log-config PATH** The name of logging configuration file. It does not disable existing loggers, but just appends specified logging configuration to any other existing logging options. Please see the Python logging module documentation for details on logging configuration files. The log-config name for this option is depcrecated. **--log-format FORMAT** A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. Default: None **--log-date-format DATE_FORMAT** Format string for %(asctime)s in log records. Default: None **--log-file PATH, --logfile PATH** (Optional) Name of log file to output to. If not set, logging will go to stdout. **--log-dir LOG_DIR, --logdir LOG_DIR** (Optional) The directory to keep log files in (will be prepended to --log-file) glance-12.0.0/doc/source/man/glancecontrol.rst0000664000567000056710000000233612701407047022376 0ustar jenkinsjenkins00000000000000============== glance-control ============== -------------------------------------- Glance daemon start/stop/reload helper -------------------------------------- :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-control [options] [CONFPATH] Where is one of: all, api, glance-api, registry, glance-registry, scrubber, glance-scrubber And command is one of: start, status, stop, shutdown, restart, reload, force-reload And CONFPATH is the optional configuration file to use. OPTIONS ======== **General Options** .. include:: general_options.rst **--pid-file=PATH** File to use as pid file. Default: /var/run/glance/$server.pid **--await-child DELAY** Period to wait for service death in order to report exit code (default is to not wait at all) **--capture-output** Capture stdout/err in syslog instead of discarding **--nocapture-output** The inverse of --capture-output **--norespawn** The inverse of --respawn **--respawn** Restart service on unexpected death .. include:: footer.rst glance-12.0.0/doc/source/man/glanceapi.rst0000664000567000056710000000116312701407047021464 0ustar jenkinsjenkins00000000000000========== glance-api ========== --------------------------------------- Server for the Glance Image Service API --------------------------------------- :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-api [options] DESCRIPTION =========== glance-api is a server daemon that serves the Glance API OPTIONS ======= **General options** .. include:: general_options.rst FILES ===== **/etc/glance/glance-api.conf** Default configuration file for Glance API .. include:: footer.rst glance-12.0.0/doc/source/man/glancemanage.rst0000664000567000056710000000512412701407047022144 0ustar jenkinsjenkins00000000000000============= glance-manage ============= ------------------------- Glance Management Utility ------------------------- :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-manage [options] DESCRIPTION =========== glance-manage is a utility for managing and configuring a Glance installation. One important use of glance-manage is to setup the database. To do this run:: glance-manage db_sync Note: glance-manage commands can be run either like this:: glance-manage db sync or with the db commands concatenated, like this:: glance-manage db_sync COMMANDS ======== **db** This is the prefix for the commands below when used with a space rather than a _. For example "db version". **db_version** This will print the current migration level of a glance database. **db_upgrade ** This will take an existing database and upgrade it to the specified VERSION. **db_downgrade ** This will take an existing database and downgrade it to the specified VERSION. **db_version_control** Place the database under migration control. **db_sync ** Place a database under migration control and upgrade, creating it first if necessary. **db_export_metadefs** Export the metadata definitions into json format. By default the definitions are exported to /etc/glance/metadefs directory. **db_load_metadefs** Load the metadata definitions into glance database. By default the definitions are imported from /etc/glance/metadefs directory. **db_unload_metadefs** Unload the metadata definitions. Clears the contents of all the glance db tables including metadef_namespace_resource_types, metadef_tags, metadef_objects, metadef_resource_types, metadef_namespaces and metadef_properties. OPTIONS ======= **General Options** .. include:: general_options.rst **--sql_connection=CONN_STRING** A proper SQLAlchemy connection string as described `here `_ .. include:: footer.rst CONFIGURATION ============= The following paths are searched for a ``glance-manage.conf`` file in the following order: * ``~/.glance`` * ``~/`` * ``/etc/glance`` * ``/etc`` All options set in ``glance-manage.conf`` override those set in ``glance-registry.conf`` and ``glance-api.conf``. glance-12.0.0/doc/source/man/glancereplicator.rst0000664000567000056710000000424412701407047023062 0ustar jenkinsjenkins00000000000000================= glance-replicator ================= --------------------------------------------- Replicate images across multiple data centers --------------------------------------------- :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-replicator [options] [args] DESCRIPTION =========== glance-replicator is a utility can be used to populate a new glance server using the images stored in an existing glance server. The images in the replicated glance server preserve the uuids, metadata, and image data from the original. COMMANDS ======== **help ** Output help for one of the commands below **compare** What is missing from the slave glance? **dump** Dump the contents of a glance instance to local disk. **livecopy** Load the contents of one glance instance into another. **load** Load the contents of a local directory into glance. **size** Determine the size of a glance instance if dumped to disk. OPTIONS ======= **-h, --help** Show this help message and exit **-c CHUNKSIZE, --chunksize=CHUNKSIZE** Amount of data to transfer per HTTP write **-d, --debug** Print debugging information **-D DONTREPLICATE, --dontreplicate=DONTREPLICATE** List of fields to not replicate **-m, --metaonly** Only replicate metadata, not images **-l LOGFILE, --logfile=LOGFILE** Path of file to log to **-s, --syslog** Log to syslog instead of a file **-t TOKEN, --token=TOKEN** Pass in your authentication token if you have one. If you use this option the same token is used for both the master and the slave. **-M MASTERTOKEN, --mastertoken=MASTERTOKEN** Pass in your authentication token if you have one. This is the token used for the master. **-S SLAVETOKEN, --slavetoken=SLAVETOKEN** Pass in your authentication token if you have one. This is the token used for the slave. **-v, --verbose** Print more verbose output .. include:: footer.rst glance-12.0.0/doc/source/man/glancecachemanage.rst0000664000567000056710000000366412701407047023137 0ustar jenkinsjenkins00000000000000=================== glance-cache-manage =================== ------------------------ Cache management utility ------------------------ :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-cache-manage [options] [args] COMMANDS ======== **help ** Output help for one of the commands below **list-cached** List all images currently cached **list-queued** List all images currently queued for caching **queue-image** Queue an image for caching **delete-cached-image** Purges an image from the cache **delete-all-cached-images** Removes all images from the cache **delete-queued-image** Deletes an image from the cache queue **delete-all-queued-images** Deletes all images from the cache queue OPTIONS ======= **--version** show program's version number and exit **-h, --help** show this help message and exit **-v, --verbose** Print more verbose output **-d, --debug** Print more verbose output **-H ADDRESS, --host=ADDRESS** Address of Glance API host. Default: 0.0.0.0 **-p PORT, --port=PORT** Port the Glance API host listens on. Default: 9292 **-k, --insecure** Explicitly allow glance to perform "insecure" SSL (https) requests. The server's certificate will not be verified against any certificate authorities. This option should be used with caution. **-A TOKEN, --auth_token=TOKEN** Authentication token to use to identify the client to the glance server **-f, --force** Prevent select actions from requesting user confirmation **-S STRATEGY, --os-auth-strategy=STRATEGY** Authentication strategy (keystone or noauth) .. include:: openstack_options.rst .. include:: footer.rst glance-12.0.0/doc/source/man/glanceregistry.rst0000664000567000056710000000125412701407047022564 0ustar jenkinsjenkins00000000000000=============== glance-registry =============== -------------------------------------- Server for the Glance Registry Service -------------------------------------- :Author: glance@lists.launchpad.net :Date: 2014-01-16 :Copyright: OpenStack LLC :Version: 2014.1 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== glance-registry [options] DESCRIPTION =========== glance-registry is a server daemon that serves image metadata through a REST-like API. OPTIONS ======= **General options** .. include:: general_options.rst FILES ===== **/etc/glance/glance-registry.conf** Default configuration file for Glance Registry .. include:: footer.rst glance-12.0.0/doc/source/man/openstack_options.rst0000664000567000056710000000100512701407047023276 0ustar jenkinsjenkins00000000000000 **-os-auth-token=OS_AUTH_TOKEN** Defaults to env[OS_AUTH_TOKEN] **--os-username=OS_USERNAME** Defaults to env[OS_USERNAME] **--os-password=OS_PASSWORD** Defaults to env[OS_PASSWORD] **--os-region-name=OS_REGION_NAME** Defaults to env[OS_REGION_NAME] **--os-tenant-id=OS_TENANT_ID** Defaults to env[OS_TENANT_ID] **--os-tenant-name=OS_TENANT_NAME** Defaults to env[OS_TENANT_NAME] **--os-auth-url=OS_AUTH_URL** Defaults to env[OS_AUTH_URL] glance-12.0.0/doc/source/configuring.rst0000664000567000056710000016260512701407051021304 0ustar jenkinsjenkins00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Basic Configuration =================== Glance has a number of options that you can use to configure the Glance API server, the Glance Registry server, and the various storage backends that Glance can use to store images. Most configuration is done via configuration files, with the Glance API server and Glance Registry server using separate configuration files. When starting up a Glance server, you can specify the configuration file to use (see :doc:`the documentation on controller Glance servers `). If you do **not** specify a configuration file, Glance will look in the following directories for a configuration file, in order: * ``~/.glance`` * ``~/`` * ``/etc/glance`` * ``/etc`` The Glance API server configuration file should be named ``glance-api.conf``. Similarly, the Glance Registry server configuration file should be named ``glance-registry.conf``. If you installed Glance via your operating system's package management system, it is likely that you will have sample configuration files installed in ``/etc/glance``. In addition to this documentation page, you can check the ``etc/glance-api.conf`` and ``etc/glance-registry.conf`` sample configuration files distributed with Glance for example configuration files for each server application with detailed comments on what each options does. The PasteDeploy configuration (controlling the deployment of the WSGI application for each component) may be found by default in -paste.ini alongside the main configuration file, .conf. For example, ``glance-api-paste.ini`` corresponds to ``glance-api.conf``. This pathname for the paste config is configurable, as follows:: [paste_deploy] config_file = /path/to/paste/config Common Configuration Options in Glance -------------------------------------- Glance has a few command-line options that are common to all Glance programs: * ``--verbose`` Optional. Default: ``False`` Can be specified on the command line and in configuration files. Turns on the INFO level in logging and prints more verbose command-line interface printouts. * ``--debug`` Optional. Default: ``False`` Can be specified on the command line and in configuration files. Turns on the DEBUG level in logging. * ``--config-file=PATH`` Optional. Default: See below for default search order. Specified on the command line only. Takes a path to a configuration file to use when running the program. If this CLI option is not specified, then we check to see if the first argument is a file. If it is, then we try to use that as the configuration file. If there is no file or there were no arguments, we search for a configuration file in the following order: * ``~/.glance`` * ``~/`` * ``/etc/glance`` * ``/etc`` The filename that is searched for depends on the server application name. So, if you are starting up the API server, ``glance-api.conf`` is searched for, otherwise ``glance-registry.conf``. * ``--config-dir=DIR`` Optional. Default: ``None`` Specified on the command line only. Takes a path to a configuration directory from which all \*.conf fragments are loaded. This provides an alternative to multiple --config-file options when it is inconvenient to explicitly enumerate all the config files, for example when an unknown number of config fragments are being generated by a deployment framework. If --config-dir is set, then --config-file is ignored. An example usage would be: $ glance-api --config-dir=/etc/glance/glance-api.d $ ls /etc/glance/glance-api.d 00-core.conf 01-s3.conf 02-swift.conf 03-ssl.conf ... etc. The numeric prefixes in the example above are only necessary if a specific parse ordering is required (i.e. if an individual config option set in an earlier fragment is overridden in a later fragment). Note that ``glance-manage`` currently loads configuration from three files: * ``glance-registry.conf`` * ``glance-api.conf`` * and the newly created ``glance-manage.conf`` By default ``glance-manage.conf`` only specifies a custom logging file but other configuration options for ``glance-manage`` should be migrated in there. **Warning**: Options set in ``glance-manage.conf`` will override options of the same section and name set in the other two. Similarly, options in ``glance-api.conf`` will override options set in ``glance-registry.conf``. This tool is planning to stop loading ``glance-registry.conf`` and ``glance-api.conf`` in a future cycle. Configuring Server Startup Options ---------------------------------- You can put the following options in the ``glance-api.conf`` and ``glance-registry.conf`` files, under the ``[DEFAULT]`` section. They enable startup and binding behaviour for the API and registry servers, respectively. * ``bind_host=ADDRESS`` The address of the host to bind to. Optional. Default: ``0.0.0.0`` * ``bind_port=PORT`` The port the server should bind to. Optional. Default: ``9191`` for the registry server, ``9292`` for the API server * ``backlog=REQUESTS`` Number of backlog requests to configure the socket with. Optional. Default: ``4096`` * ``tcp_keepidle=SECONDS`` Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not supported on OS X. Optional. Default: ``600`` * ``client_socket_timeout=SECONDS`` Timeout for client connections' socket operations. If an incoming connection is idle for this period it will be closed. A value of `0` means wait forever. Optional. Default: ``900`` * ``workers=PROCESSES`` Number of Glance API or Registry worker processes to start. Each worker process will listen on the same port. Increasing this value may increase performance (especially if using SSL with compression enabled). Typically it is recommended to have one worker process per CPU. The value `0` will prevent any new processes from being created. Optional. Default: The number of CPUs available will be used by default. * ``max_request_id_length=LENGTH`` Limits the maximum size of the x-openstack-request-id header which is logged. Affects only if context middleware is configured in pipeline. Optional. Default: ``64`` (Limited by max_header_line default: 16384) Configuring SSL Support ~~~~~~~~~~~~~~~~~~~~~~~~~ * ``cert_file=PATH`` Path to the certificate file the server should use when binding to an SSL-wrapped socket. Optional. Default: not enabled. * ``key_file=PATH`` Path to the private key file the server should use when binding to an SSL-wrapped socket. Optional. Default: not enabled. * ``ca_file=PATH`` Path to the CA certificate file the server should use to validate client certificates provided during an SSL handshake. This is ignored if ``cert_file`` and ''key_file`` are not set. Optional. Default: not enabled. Configuring Registry Access ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are a number of configuration options in Glance that control how the API server accesses the registry server. * ``registry_client_protocol=PROTOCOL`` If you run a secure Registry server, you need to set this value to ``https`` and also set ``registry_client_key_file`` and optionally ``registry_client_cert_file``. Optional. Default: http * ``registry_client_key_file=PATH`` The path to the key file to use in SSL connections to the registry server, if any. Alternately, you may set the ``GLANCE_CLIENT_KEY_FILE`` environ variable to a filepath of the key file Optional. Default: Not set. * ``registry_client_cert_file=PATH`` Optional. Default: Not set. The path to the cert file to use in SSL connections to the registry server, if any. Alternately, you may set the ``GLANCE_CLIENT_CERT_FILE`` environ variable to a filepath of the cert file * ``registry_client_ca_file=PATH`` Optional. Default: Not set. The path to a Certifying Authority's cert file to use in SSL connections to the registry server, if any. Alternately, you may set the ``GLANCE_CLIENT_CA_FILE`` environ variable to a filepath of the CA cert file * ``registry_client_insecure=False`` Optional. Default: False. When using SSL in connections to the registry server, do not require validation via a certifying authority. This is the registry's equivalent of specifying --insecure on the command line using glanceclient for the API * ``registry_client_timeout=SECONDS`` Optional. Default: ``600``. The period of time, in seconds, that the API server will wait for a registry request to complete. A value of '0' implies no timeout. .. note:: ``use_user_token``, ``admin_user``, ``admin_password``, ``admin_tenant_name``, ``auth_url``, ``auth_strategy`` and ``auth_region`` options were considered harmful and have been deprecated in M release. They will be removed in O release. For more information read `OSSN-0060 `_. Related functionality with uploading big images has been implemented with Keystone trusts support. * ``use_user_token=True`` Optional. Default: True DEPRECATED. This option will be removed in O release. Pass the user token through for API requests to the registry. If 'use_user_token' is not in effect then admin credentials can be specified (see below). If admin credentials are specified then they are used to generate a token; this token rather than the original user's token is used for requests to the registry. * ``admin_user=USER`` DEPRECATED. This option will be removed in O release. If 'use_user_token' is not in effect then admin credentials can be specified. Use this parameter to specify the username. Optional. Default: None * ``admin_password=PASSWORD`` DEPRECATED. This option will be removed in O release. If 'use_user_token' is not in effect then admin credentials can be specified. Use this parameter to specify the password. Optional. Default: None * ``admin_tenant_name=TENANTNAME`` DEPRECATED. This option will be removed in O release. If 'use_user_token' is not in effect then admin credentials can be specified. Use this parameter to specify the tenant name. Optional. Default: None * ``auth_url=URL`` DEPRECATED. This option will be removed in O release. If 'use_user_token' is not in effect then admin credentials can be specified. Use this parameter to specify the Keystone endpoint. Optional. Default: None * ``auth_strategy=STRATEGY`` DEPRECATED. This option will be removed in O release. If 'use_user_token' is not in effect then admin credentials can be specified. Use this parameter to specify the auth strategy. Optional. Default: noauth * ``auth_region=REGION`` DEPRECATED. This option will be removed in O release. If 'use_user_token' is not in effect then admin credentials can be specified. Use this parameter to specify the region. Optional. Default: None Configuring Logging in Glance ----------------------------- There are a number of configuration options in Glance that control how Glance servers log messages. * ``--log-config=PATH`` Optional. Default: ``None`` Specified on the command line only. Takes a path to a configuration file to use for configuring logging. Logging Options Available Only in Configuration Files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You will want to place the different logging options in the **[DEFAULT]** section in your application configuration file. As an example, you might do the following for the API server, in a configuration file called ``etc/glance-api.conf``:: [DEFAULT] log_file = /var/log/glance/api.log * ``log_file`` The filepath of the file to use for logging messages from Glance's servers. If missing, the default is to output messages to ``stdout``, so if you are running Glance servers in a daemon mode (using ``glance-control``) you should make sure that the ``log_file`` option is set appropriately. * ``log_dir`` The filepath of the directory to use for log files. If not specified (the default) the ``log_file`` is used as an absolute filepath. * ``log_date_format`` The format string for timestamps in the log output. Defaults to ``%Y-%m-%d %H:%M:%S``. See the `logging module `_ documentation for more information on setting this format string. * ``log_use_syslog`` Use syslog logging functionality. Defaults to False. Configuring Glance Storage Backends ----------------------------------- There are a number of configuration options in Glance that control how Glance stores disk images. These configuration options are specified in the ``glance-api.conf`` config file in the section ``[glance_store]``. * ``default_store=STORE`` Optional. Default: ``file`` Can only be specified in configuration files. Sets the storage backend to use by default when storing images in Glance. Available options for this option are (``file``, ``swift``, ``s3``, ``rbd``, ``sheepdog``, ``cinder`` or ``vsphere``). In order to select a default store it must also be listed in the ``stores`` list described below. * ``stores=STORES`` Optional. Default: ``glance.store.filesystem.Store, glance.store.http.Store`` A comma separated list of enabled glance stores. Options are specified in the format of glance.store.OPTION.Store. Some available options for this option are (``filesystem``, ``http``, ``rbd``, ``s3``, ``swift``, ``sheepdog``, ``cinder``, ``vmware_datastore``) Configuring the Filesystem Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``filesystem_store_datadir=PATH`` Optional. Default: ``/var/lib/glance/images/`` Can only be specified in configuration files. `This option is specific to the filesystem storage backend.` Sets the path where the filesystem storage backend write disk images. Note that the filesystem storage backend will attempt to create this directory if it does not exist. Ensure that the user that ``glance-api`` runs under has write permissions to this directory. * ``filesystem_store_file_perm=PERM_MODE`` Optional. Default: ``0`` Can only be specified in configuration files. `This option is specific to the filesystem storage backend.` The required permission value, in octal representation, for the created image file. You can use this value to specify the user of the consuming service (such as Nova) as the only member of the group that owns the created files. To keep the default value, assign a permission value that is less than or equal to 0. Note that the file owner must maintain read permission; if this value removes that permission an error message will be logged and the BadStoreConfiguration exception will be raised. If the Glance service has insufficient privileges to change file access permissions, a file will still be saved, but a warning message will appear in the Glance log. Configuring the Filesystem Storage Backend with multiple stores ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``filesystem_store_datadirs=PATH:PRIORITY`` Optional. Default: ``/var/lib/glance/images/:1`` Example:: filesystem_store_datadirs = /var/glance/store filesystem_store_datadirs = /var/glance/store1:100 filesystem_store_datadirs = /var/glance/store2:200 This option can only be specified in configuration file and is specific to the filesystem storage backend only. filesystem_store_datadirs option allows administrators to configure multiple store directories to save glance image in filesystem storage backend. Each directory can be coupled with its priority. **NOTE**: * This option can be specified multiple times to specify multiple stores. * Either filesystem_store_datadir or filesystem_store_datadirs option must be specified in glance-api.conf * Store with priority 200 has precedence over store with priority 100. * If no priority is specified, default priority '0' is associated with it. * If two filesystem stores have same priority store with maximum free space will be chosen to store the image. * If same store is specified multiple times then BadStoreConfiguration exception will be raised. Configuring the Swift Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``swift_store_auth_address=URL`` Required when using the Swift storage backend. Can only be specified in configuration files. Deprecated. Use ``auth_address`` in the Swift back-end configuration file instead. `This option is specific to the Swift storage backend.` Sets the authentication URL supplied to Swift when making calls to its storage system. For more information about the Swift authentication system, please see the `Swift auth `_ documentation and the `overview of Swift authentication `_. **IMPORTANT NOTE**: Swift authentication addresses use HTTPS by default. This means that if you are running Swift with authentication over HTTP, you need to set your ``swift_store_auth_address`` to the full URL, including the ``http://``. * ``swift_store_user=USER`` Required when using the Swift storage backend. Can only be specified in configuration files. Deprecated. Use ``user`` in the Swift back-end configuration file instead. `This option is specific to the Swift storage backend.` Sets the user to authenticate against the ``swift_store_auth_address`` with. * ``swift_store_key=KEY`` Required when using the Swift storage backend. Can only be specified in configuration files. Deprecated. Use ``key`` in the Swift back-end configuration file instead. `This option is specific to the Swift storage backend.` Sets the authentication key to authenticate against the ``swift_store_auth_address`` with for the user ``swift_store_user``. * ``swift_store_container=CONTAINER`` Optional. Default: ``glance`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Sets the name of the container to use for Glance images in Swift. * ``swift_store_create_container_on_put`` Optional. Default: ``False`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` If true, Glance will attempt to create the container ``swift_store_container`` if it does not exist. * ``swift_store_large_object_size=SIZE_IN_MB`` Optional. Default: ``5120`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` What size, in MB, should Glance start chunking image files and do a large object manifest in Swift? By default, this is the maximum object size in Swift, which is 5GB * ``swift_store_large_object_chunk_size=SIZE_IN_MB`` Optional. Default: ``200`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` When doing a large object manifest, what size, in MB, should Glance write chunks to Swift? The default is 200MB. * ``swift_store_multi_tenant=False`` Optional. Default: ``False`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` If set to True enables multi-tenant storage mode which causes Glance images to be stored in tenant specific Swift accounts. When set to False Glance stores all images in a single Swift account. * ``swift_store_multiple_containers_seed`` Optional. Default: ``0`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` When set to 0, a single-tenant store will only use one container to store all images. When set to an integer value between 1 and 32, a single-tenant store will use multiple containers to store images, and this value will determine how many characters from an image UUID are checked when determining what container to place the image in. The maximum number of containers that will be created is approximately equal to 16^N. This setting is used only when swift_store_multi_tenant is disabled. Example: if this config option is set to 3 and swift_store_container = 'glance', then an image with UUID 'fdae39a1-bac5-4238-aba4-69bcc726e848' would be placed in the container 'glance_fda'. All dashes in the UUID are included when creating the container name but do not count toward the character limit, so in this example with N=10 the container name would be 'glance_fdae39a1-ba'. When choosing the value for swift_store_multiple_containers_seed, deployers should discuss a suitable value with their swift operations team. The authors of this option recommend that large scale deployments use a value of '2', which will create a maximum of ~256 containers. Choosing a higher number than this, even in extremely large scale deployments, may not have any positive impact on performance and could lead to a large number of empty, unused containers. The largest of deployments could notice an increase in performance if swift rate limits are throttling on single container. Note: If dynamic container creation is turned off, any value for this configuration option higher than '1' may be unreasonable as the deployer would have to manually create each container. * ``swift_store_admin_tenants`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: Not set. A list of swift ACL strings that will be applied as both read and write ACLs to the containers created by Glance in multi-tenant mode. This grants the specified tenants/users read and write access to all newly created image objects. The standard swift ACL string formats are allowed, including: : : \*: Multiple ACLs can be combined using a comma separated list, for example: swift_store_admin_tenants = service:glance,*:admin * ``swift_store_auth_version`` Can only be specified in configuration files. Deprecated. Use ``auth_version`` in the Swift back-end configuration file instead. `This option is specific to the Swift storage backend.` Optional. Default: ``2`` A string indicating which version of Swift OpenStack authentication to use. See the project `python-swiftclient `_ for more details. * ``swift_store_service_type`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: ``object-store`` A string giving the service type of the swift service to use. This setting is only used if swift_store_auth_version is ``2``. * ``swift_store_region`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: Not set. A string giving the region of the swift service endpoint to use. This setting is only used if swift_store_auth_version is ``2``. This setting is especially useful for disambiguation if multiple swift services might appear in a service catalog during authentication. * ``swift_store_endpoint_type`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: ``publicURL`` A string giving the endpoint type of the swift service endpoint to use. This setting is only used if swift_store_auth_version is ``2``. * ``swift_store_ssl_compression`` Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: True. If set to False, disables SSL layer compression of https swift requests. Setting to 'False' may improve performance for images which are already in a compressed format, e.g. qcow2. If set to True then compression will be enabled (provided it is supported by the swift proxy). * ``swift_store_cacert`` Can only be specified in configuration files. Optional. Default: ``None`` A string giving the path to a CA certificate bundle that will allow Glance's services to perform SSL verification when communicating with Swift. * ``swift_store_retry_get_count`` The number of times a Swift download will be retried before the request fails. Optional. Default: ``0`` Configuring Multiple Swift Accounts/Stores ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In order to not store Swift account credentials in the database, and to have support for multiple accounts (or multiple Swift backing stores), a reference is stored in the database and the corresponding configuration (credentials/ parameters) details are stored in the configuration file. Optional. Default: not enabled. The location for this file is specified using the ``swift_store_config_file`` config file in the section ``[DEFAULT]``. **If an incorrect value is specified, Glance API Swift store service will not be configured.** * ``swift_store_config_file=PATH`` `This option is specific to the Swift storage backend.` * ``default_swift_reference=DEFAULT_REFERENCE`` Required when multiple Swift accounts/backing stores are configured. Can only be specified in configuration files. `This option is specific to the Swift storage backend.` It is the default swift reference that is used to add any new images. * ``swift_store_auth_insecure`` If True, bypass SSL certificate verification for Swift. Can only be specified in configuration files. `This option is specific to the Swift storage backend.` Optional. Default: ``False`` Configuring Swift configuration file ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If ``swift_store_config_file`` is set, Glance will use information from the file specified under this parameter. .. note:: The ``swift_store_config_file`` is currently used only for single-tenant Swift store configurations. If you configure a multi-tenant Swift store back end (``swift_store_multi_tenant=True``), ensure that both ``swift_store_config_file`` and ``default_swift_reference`` are *not* set. The file contains a set of references like: .. code-block:: ini [ref1] user = tenant:user1 key = key1 auth_version = 2 auth_address = http://localhost:5000/v2.0 [ref2] user = project_name:user_name2 key = key2 user_domain_id = default project_domain_id = default auth_version = 3 auth_address = http://localhost:5000/v3 A default reference must be configured. Its parameters will be used when creating new images. For example, to specify ``ref2`` as the default reference, add the following value to the [glance_store] section of :file:`glance-api.conf` file: .. code-block:: ini default_swift_reference = ref2 In the reference, a user can specify the following parameters: * ``user`` A *project_name user_name* pair in the ``project_name:user_name`` format to authenticate against the Swift authentication service. * ``key`` An authentication key for a user authenticating against the Swift authentication service. * ``auth_address`` An address where the Swift authentication service is located. * ``auth_version`` A version of the authentication service to use. Valid versions are ``2`` and ``3`` for Keystone and ``1`` (deprecated) for Swauth and Rackspace. Optional. Default: ``2`` * ``project_domain_id`` A domain ID of the project which is the requested project-level authorization scope. Optional. Default: ``None`` `This option can be specified if ``auth_version`` is ``3`` .` * ``project_domain_name`` A domain name of the project which is the requested project-level authorization scope. Optional. Default: ``None`` `This option can be specified if ``auth_version`` is ``3`` .` * ``user_domain_id`` A domain ID of the user which is the requested domain-level authorization scope. Optional. Default: ``None`` `This option can be specified if ``auth_version`` is ``3`` .` * ``user_domain_name`` A domain name of the user which is the requested domain-level authorization scope. Optional. Default: ``None`` `This option can be specified if ``auth_version`` is ``3``. ` Configuring the S3 Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``s3_store_host=URL`` Required when using the S3 storage backend. Can only be specified in configuration files. `This option is specific to the S3 storage backend.` Default: s3.amazonaws.com Sets the main service URL supplied to S3 when making calls to its storage system. For more information about the S3 authentication system, please see the `S3 documentation `_ * ``s3_store_access_key=ACCESS_KEY`` Required when using the S3 storage backend. Can only be specified in configuration files. `This option is specific to the S3 storage backend.` Sets the access key to authenticate against the ``s3_store_host`` with. You should set this to your 20-character Amazon AWS access key. * ``s3_store_secret_key=SECRET_KEY`` Required when using the S3 storage backend. Can only be specified in configuration files. `This option is specific to the S3 storage backend.` Sets the secret key to authenticate against the ``s3_store_host`` with for the access key ``s3_store_access_key``. You should set this to your 40-character Amazon AWS secret key. * ``s3_store_bucket=BUCKET`` Required when using the S3 storage backend. Can only be specified in configuration files. `This option is specific to the S3 storage backend.` Sets the name of the bucket to use for Glance images in S3. Note that the namespace for S3 buckets is **global**, therefore you must use a name for the bucket that is unique. It is recommended that you use a combination of your AWS access key, **lowercased** with "glance". For instance if your Amazon AWS access key is: ``ABCDEFGHIJKLMNOPQRST`` then make your bucket value be: ``abcdefghijklmnopqrstglance`` * ``s3_store_create_bucket_on_put`` Optional. Default: ``False`` Can only be specified in configuration files. `This option is specific to the S3 storage backend.` If true, Glance will attempt to create the bucket ``s3_store_bucket`` if it does not exist. * ``s3_store_object_buffer_dir=PATH`` Optional. Default: ``the platform's default temporary directory`` Can only be specified in configuration files. `This option is specific to the S3 storage backend.` When sending images to S3, what directory should be used to buffer the chunks? By default the platform's temporary directory will be used. * ``s3_store_large_object_size=SIZE_IN_MB`` Optional. Default: ``100`` Can only be specified in configuration files. `This option is specific to the S3 storage backend.` Size, in ``MB``, should S3 start chunking image files and do a multipart upload in S3. * ``s3_store_large_object_chunk_size=SIZE_IN_MB`` Optional. Default: ``10`` Can only be specified in configuration files. `This option is specific to the S3 storage backend.` Multipart upload part size, in ``MB``, should S3 use when uploading parts. The size must be greater than or equal to 5MB. The default is 10MB. * ``s3_store_thread_pools=NUM`` Optional. Default: ``10`` Can only be specified in configuration files. `This option is specific to the S3 storage backend.` The number of thread pools to perform a multipart upload in S3. The default is 10. Configuring the RBD Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Note**: the RBD storage backend requires the python bindings for librados and librbd. These are in the python-ceph package on Debian-based distributions. * ``rbd_store_pool=POOL`` Optional. Default: ``rbd`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Sets the RADOS pool in which images are stored. * ``rbd_store_chunk_size=CHUNK_SIZE_MB`` Optional. Default: ``4`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two. * ``rados_connect_timeout`` Optional. Default: ``0`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Prevents glance-api hangups during the connection to RBD. Sets the time to wait (in seconds) for glance-api before closing the connection. Setting ``rados_connect_timeout<=0`` means no timeout. * ``rbd_store_ceph_conf=PATH`` Optional. Default: ``/etc/ceph/ceph.conf``, ``~/.ceph/config``, and ``./ceph.conf`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Sets the Ceph configuration file to use. * ``rbd_store_user=NAME`` Optional. Default: ``admin`` Can only be specified in configuration files. `This option is specific to the RBD storage backend.` Sets the RADOS user to authenticate as. This is only needed when `RADOS authentication `_ is `enabled. `_ A keyring must be set for this user in the Ceph configuration file, e.g. with a user ``glance``:: [client.glance] keyring=/etc/glance/rbd.keyring To set up a user named ``glance`` with minimal permissions, using a pool called ``images``, run:: rados mkpool images ceph-authtool --create-keyring /etc/glance/rbd.keyring ceph-authtool --gen-key --name client.glance --cap mon 'allow r' --cap osd 'allow rwx pool=images' /etc/glance/rbd.keyring ceph auth add client.glance -i /etc/glance/rbd.keyring Configuring the Sheepdog Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``sheepdog_store_address=ADDR`` Optional. Default: ``localhost`` Can only be specified in configuration files. `This option is specific to the Sheepdog storage backend.` Sets the IP address of the sheep daemon * ``sheepdog_store_port=PORT`` Optional. Default: ``7000`` Can only be specified in configuration files. `This option is specific to the Sheepdog storage backend.` Sets the IP port of the sheep daemon * ``sheepdog_store_chunk_size=SIZE_IN_MB`` Optional. Default: ``64`` Can only be specified in configuration files. `This option is specific to the Sheepdog storage backend.` Images will be chunked into objects of this size (in megabytes). For best performance, this should be a power of two. Configuring the Cinder Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Note**: Currently Cinder store is experimental. Current deployers should be aware that the use of it in production right now may be risky. It is expected to work well with most iSCSI Cinder backends such as LVM iSCSI, but will not work with some backends especially if they don't support host-attach. **Note**: To create a Cinder volume from an image in this store quickly, additional settings are required. Please see the `Volume-backed image `_ documentation for more information. * ``cinder_catalog_info=::`` Optional. Default: ``volumev2::publicURL`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Sets the info to match when looking for cinder in the service catalog. Format is : separated values of the form: :: * ``cinder_endpoint_template=http://ADDR:PORT/VERSION/%(tenant)s`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Override service catalog lookup with template for cinder endpoint. ``%(...)s`` parts are replaced by the value in the request context. e.g. http://localhost:8776/v2/%(tenant)s * ``os_region_name=REGION_NAME`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Region name of this node. Deprecated. Use ``cinder_os_region_name`` instead. * ``cinder_os_region_name=REGION_NAME`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Region name of this node. If specified, it is used to locate cinder from the service catalog. * ``cinder_ca_certificates_file=CA_FILE_PATH`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Location of ca certificates file to use for cinder client requests. * ``cinder_http_retries=TIMES`` Optional. Default: ``3`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Number of cinderclient retries on failed http calls. * ``cinder_state_transition_timeout`` Optional. Default: ``300`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Time period, in seconds, to wait for a cinder volume transition to complete. * ``cinder_api_insecure=ON_OFF`` Optional. Default: ``False`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Allow to perform insecure SSL requests to cinder. * ``cinder_store_user_name=NAME`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` User name to authenticate against Cinder. If , the user of current context is used. **NOTE**: This option is applied only if all of ``cinder_store_user_name``, ``cinder_store_password``, ``cinder_store_project_name`` and ``cinder_store_auth_address`` are set. These options are useful to put image volumes into the internal service project in order to hide the volume from users, and to make the image sharable among projects. * ``cinder_store_password=PASSWORD`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Password for the user authenticating against Cinder. If , the current context auth token is used. * ``cinder_store_project_name=NAME`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Project name where the image is stored in Cinder. If , the project in current context is used. * ``cinder_store_auth_address=URL`` Optional. Default: ``None`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` The address where the Cinder authentication service is listening. If , the cinder endpoint in the service catalog is used. * ``rootwrap_config=NAME`` Optional. Default: ``/etc/glance/rootwrap.conf`` Can only be specified in configuration files. `This option is specific to the Cinder storage backend.` Path to the rootwrap configuration file to use for running commands as root. Configuring the VMware Storage Backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``vmware_server_host=ADDRESS`` Required when using the VMware storage backend. Can only be specified in configuration files. Sets the address of the ESX/ESXi or vCenter Server target system. The address can contain an IP (``127.0.0.1``), an IP and port (``127.0.0.1:443``), a DNS name (``www.my-domain.com``) or DNS and port. `This option is specific to the VMware storage backend.` * ``vmware_server_username=USERNAME`` Required when using the VMware storage backend. Can only be specified in configuration files. Username for authenticating with VMware ESX/ESXi or vCenter Server. * ``vmware_server_password=PASSWORD`` Required when using the VMware storage backend. Can only be specified in configuration files. Password for authenticating with VMware ESX/ESXi or vCenter Server. * ``vmware_datacenter_path=DC_PATH`` Optional. Default: ``ha-datacenter`` Can only be specified in configuration files. Inventory path to a datacenter. If the ``vmware_server_host`` specified is an ESX/ESXi, the ``vmware_datacenter_path`` is optional. If specified, it should be ``ha-datacenter``. * ``vmware_datastore_name=DS_NAME`` Required when using the VMware storage backend. Can only be specified in configuration files. Datastore name associated with the ``vmware_datacenter_path`` * ``vmware_datastores`` Optional. Default: Not set. This option can only be specified in configuration file and is specific to the VMware storage backend. vmware_datastores allows administrators to configure multiple datastores to save glance image in the VMware store backend. The required format for the option is: ::. where datacenter_path is the inventory path to the datacenter where the datastore is located. An optional weight can be given to specify the priority. Example:: vmware_datastores = datacenter1:datastore1 vmware_datastores = dc_folder/datacenter2:datastore2:100 vmware_datastores = datacenter1:datastore3:200 **NOTE**: - This option can be specified multiple times to specify multiple datastores. - Either vmware_datastore_name or vmware_datastores option must be specified in glance-api.conf - Datastore with weight 200 has precedence over datastore with weight 100. - If no weight is specified, default weight '0' is associated with it. - If two datastores have same weight, the datastore with maximum free space will be chosen to store the image. - If the datacenter path or datastore name contains a colon (:) symbol, it must be escaped with a backslash. * ``vmware_api_retry_count=TIMES`` Optional. Default: ``10`` Can only be specified in configuration files. The number of times VMware ESX/VC server API must be retried upon connection related issues. * ``vmware_task_poll_interval=SECONDS`` Optional. Default: ``5`` Can only be specified in configuration files. The interval used for polling remote tasks invoked on VMware ESX/VC server. * ``vmware_store_image_dir`` Optional. Default: ``/openstack_glance`` Can only be specified in configuration files. The path to access the folder where the images will be stored in the datastore. * ``vmware_api_insecure=ON_OFF`` Optional. Default: ``False`` Can only be specified in configuration files. Allow to perform insecure SSL requests to ESX/VC server. Configuring the Storage Endpoint ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ``swift_store_endpoint=URL`` Optional. Default: ``None`` Can only be specified in configuration files. Overrides the storage URL returned by auth. The URL should include the path up to and excluding the container. The location of an object is obtained by appending the container and object to the configured URL. e.g. ``https://www.my-domain.com/v1/path_up_to_container`` Configuring Glance Image Size Limit ----------------------------------- The following configuration option is specified in the ``glance-api.conf`` config file in the section ``[DEFAULT]``. * ``image_size_cap=SIZE`` Optional. Default: ``1099511627776`` (1 TB) Maximum image size, in bytes, which can be uploaded through the Glance API server. **IMPORTANT NOTE**: this value should only be increased after careful consideration and must be set to a value under 8 EB (9223372036854775808). Configuring Glance User Storage Quota ------------------------------------- The following configuration option is specified in the ``glance-api.conf`` config file in the section ``[DEFAULT]``. * ``user_storage_quota`` Optional. Default: 0 (Unlimited). This value specifies the maximum amount of storage that each user can use across all storage systems. Optionally unit can be specified for the value. Values are accepted in B, KB, MB, GB or TB which are for Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. Default unit is Bytes. Example values would be, user_storage_quota=20GB Configuring the Image Cache --------------------------- Glance API servers can be configured to have a local image cache. Caching of image files is transparent and happens using a piece of middleware that can optionally be placed in the server application pipeline. This pipeline is configured in the PasteDeploy configuration file, -paste.ini. You should not generally have to edit this file directly, as it ships with ready-made pipelines for all common deployment flavors. Enabling the Image Cache Middleware ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the image cache middleware, the cache middleware must occur in the application pipeline **after** the appropriate context middleware. The cache middleware should be in your ``glance-api-paste.ini`` in a section titled ``[filter:cache]``. It should look like this:: [filter:cache] paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory A ready-made application pipeline including this filter is defined in the ``glance-api-paste.ini`` file, looking like so:: [pipeline:glance-api-caching] pipeline = versionnegotiation context cache apiv1app To enable the above application pipeline, in your main ``glance-api.conf`` configuration file, select the appropriate deployment flavor like so:: [paste_deploy] flavor = caching Enabling the Image Cache Management Middleware ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is an optional ``cachemanage`` middleware that allows you to directly interact with cache images. Use this flavor in place of the ``cache`` flavor in your api config file. There are three types you can chose: ``cachemanagement``, ``keystone+cachemanagement`` and ``trusted-auth+cachemanagement``.:: [paste_deploy] flavor = keystone+cachemanagement Configuration Options Affecting the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: These configuration options must be set in both the glance-cache and glance-api configuration files. One main configuration file option affects the image cache. * ``image_cache_dir=PATH`` Required when image cache middleware is enabled. Default: ``/var/lib/glance/image-cache`` This is the base directory the image cache can write files to. Make sure the directory is writable by the user running the ``glance-api`` server * ``image_cache_driver=DRIVER`` Optional. Choice of ``sqlite`` or ``xattr`` Default: ``sqlite`` The default ``sqlite`` cache driver has no special dependencies, other than the ``python-sqlite3`` library, which is installed on virtually all operating systems with modern versions of Python. It stores information about the cached files in a SQLite database. The ``xattr`` cache driver required the ``python-xattr>=0.6.0`` library and requires that the filesystem containing ``image_cache_dir`` have access times tracked for all files (in other words, the noatime option CANNOT be set for that filesystem). In addition, ``user_xattr`` must be set on the filesystem's description line in fstab. Because of these requirements, the ``xattr`` cache driver is not available on Windows. * ``image_cache_sqlite_db=DB_FILE`` Optional. Default: ``cache.db`` When using the ``sqlite`` cache driver, you can set the name of the database that will be used to store the cached images information. The database is always contained in the ``image_cache_dir``. * ``image_cache_max_size=SIZE`` Optional. Default: ``10737418240`` (10 GB) Size, in bytes, that the image cache should be constrained to. Images files are cached automatically in the local image cache, even if the writing of that image file would put the total cache size over this size. The ``glance-cache-pruner`` executable is what prunes the image cache to be equal to or less than this value. The ``glance-cache-pruner`` executable is designed to be run via cron on a regular basis. See more about this executable in :doc:`Controlling the Growth of the Image Cache ` .. _configuring-the-glance-registry: Configuring the Glance Registry ------------------------------- There are a number of configuration options in Glance that control how this registry server operates. These configuration options are specified in the ``glance-registry.conf`` config file in the section ``[DEFAULT]``. **IMPORTANT NOTE**: The glance-registry service is only used in conjunction with the glance-api service when clients are using the v1 REST API. See `Configuring Glance APIs`_ for more info. * ``sql_connection=CONNECTION_STRING`` (``--sql-connection`` when specified on command line) Optional. Default: ``None`` Can be specified in configuration files. Can also be specified on the command-line for the ``glance-manage`` program. Sets the SQLAlchemy connection string to use when connecting to the registry database. Please see the documentation for `SQLAlchemy connection strings `_ online. You must urlencode any special characters in CONNECTION_STRING. * ``sql_timeout=SECONDS`` on command line) Optional. Default: ``3600`` Can only be specified in configuration files. Sets the number of seconds after which SQLAlchemy should reconnect to the datastore if no activity has been made on the connection. * ``enable_v1_registry=`` Optional. Default: ``True`` * ``enable_v2_registry=`` Optional. Default: ``True`` Defines which version(s) of the Registry API will be enabled. If the Glance API server parameter ``enable_v1_api`` has been set to ``True`` the ``enable_v1_registry`` has to be ``True`` as well. If the Glance API server parameter ``enable_v2_api`` has been set to ``True`` and the parameter ``data_api`` has been set to ``glance.db.registry.api`` the ``enable_v2_registry`` has to be set to ``True`` Configuring Notifications ------------------------- Glance can optionally generate notifications to be logged or sent to a message queue. The configuration options are specified in the ``glance-api.conf`` config file in the section ``[DEFAULT]``. * ``notification_driver`` Optional. Default: ``noop`` Sets the notification driver used by oslo.messaging. Options include ``messaging``, ``messagingv2``, ``log`` and ``routing``. For more information see :doc:`Glance notifications ` and `oslo.messaging `_. * ``disabled_notifications`` Optional. Default: ``[]`` List of disabled notifications. A notification can be given either as a notification type to disable a single event, or as a notification group prefix to disable all events within a group. Example: if this config option is set to ["image.create", "metadef_namespace"], then "image.create" notification will not be sent after image is created and none of the notifications for metadefinition namespaces will be sent. Configuring Glance Property Protections --------------------------------------- Access to image meta properties may be configured using a :doc:`Property Protections Configuration file `. The location for this file can be specified in the ``glance-api.conf`` config file in the section ``[DEFAULT]``. **If an incorrect value is specified, glance api service will not start.** * ``property_protection_file=PATH`` Optional. Default: not enabled. If property_protection_file is set, the file may use either roles or policies to specify property protections. * ``property_protection_rule_format=`` Optional. Default: ``roles``. Configuring Glance APIs ----------------------- The glance-api service implements versions 1 and 2 of the OpenStack Images API. Disable any version of the Images API using the following options: * ``enable_v1_api=`` Optional. Default: ``True`` * ``enable_v2_api=`` Optional. Default: ``True`` **IMPORTANT NOTE**: To use v2 registry in v2 API, you must set ``data_api`` to glance.db.registry.api in glance-api.conf. Configuring Glance Tasks ------------------------ Glance Tasks are implemented only for version 2 of the OpenStack Images API. The config value ``task_time_to_live`` is used to determine how long a task would be visible to the user after transitioning to either the ``success`` or the ``failure`` state. * ``task_time_to_live=`` Optional. Default: ``48`` The config value ``task_executor`` is used to determine which executor should be used by the Glance service to process the task. The currently available implementation is: ``taskflow``. * ``task_executor=`` Optional. Default: ``taskflow`` The ``taskflow`` engine has its own set of configuration options, under the ``taskflow_executor`` section, that can be tuned to improve the task execution process. Among the available options, you may find ``engine_mode`` and ``max_workers``. The former allows for selecting an execution model and the available options are ``serial``, ``parallel`` and ``worker-based``. The ``max_workers`` option, instead, allows for controlling the number of workers that will be instantiated per executor instance. The default value for the ``engine_mode`` is ``parallel``, whereas the default number of ``max_workers`` is ``10``. Configuring Glance performance profiling ---------------------------------------- Glance supports using osprofiler to trace the performance of each key internal handling, including RESTful API calling, DB operation and etc. ``Please be aware that Glance performance profiling is currently a work in progress feature.`` Although, some trace points is available, e.g. API execution profiling at wsgi main entry and SQL execution profiling at DB module, the more fine-grained trace point is being worked on. The config value ``enabled`` is used to determine whether fully enable profiling feature for glance-api and glance-registry service. * ``enabled=`` Optional. Default: ``False`` There is one more configuration option that needs to be defined to enable Glance services profiling. The config value ``hmac_keys`` is used for encrypting context data for performance profiling. * ``hmac_keys=`` Optional. Default: ``SECRET_KEY`` **IMPORTANT NOTE**: in order to make profiling work as designed operator needs to make those values of HMAC key be consistent for all services in their deployment. Without HMAC key the profiling will not be triggered even profiling feature is enabled. **IMPORTANT NOTE**: previously HMAC keys (as well as enabled parameter) were placed at /etc/glance/api-paste.ini and /etc/glance/registry-paste.ini files for Glance API and Glance Registry services respectively. Starting with opsrofiler 0.3.1 release there is no need to set these arguments in the *-paste.ini files. This functionality is still supported, although the config values are having larger priority. The config value ``trace_sqlalchemy`` is used to determine whether fully enable sqlalchemy engine based SQL execution profiling feature for glance-api and glance-registry services. * ``trace_sqlalchemy=`` Optional. Default: ``False`` Configuring Glance public endpoint ---------------------------------- This setting allows an operator to configure the endpoint URL that will appear in the Glance "versions" response (that is, the response to ``GET /``\ ). This can be necessary when the Glance API service is run behind a proxy because the default endpoint displayed in the versions response is that of the host actually running the API service. If Glance is being run behind a load balancer, for example, direct access to individual hosts running the Glance API may not be allowed, hence the load balancer URL would be used for this value. * ``public_endpoint=`` Optional. Default: ``None`` Configuring Glance digest algorithm ----------------------------------- Digest algorithm that will be used for digital signature. The default is sha256. Use the command:: openssl list-message-digest-algorithms to get the available algorithms supported by the version of OpenSSL on the platform. Examples are "sha1", "sha256", "sha512", etc. If an invalid digest algorithm is configured, all digital signature operations will fail and return a ValueError exception with "No such digest method" error. * ``digest_algorithm=`` Optional. Default: ``sha256`` Configuring http_keepalive option --------------------------------- * ``http_keepalive=`` If False, server will return the header "Connection: close", If True, server will return "Connection: Keep-Alive" in its responses. In order to close the client socket connection explicitly after the response is sent and read successfully by the client, you simply have to set this option to False when you create a wsgi server. Configuring the Health Check ---------------------------- This setting allows an operator to configure the endpoint URL that will provide information to load balancer if given API endpoint at the node should be available or not. Both Glance API and Glance Registry servers can be configured to expose a health check URL. To enable the health check middleware, it must occur in the beginning of the application pipeline. The health check middleware should be placed in your ``glance-api-paste.ini`` / ``glance-registry-paste.ini`` in a section titled ``[filter:healthcheck]``. It should look like this:: [filter:healthcheck] paste.filter_factory = oslo_middleware:Healthcheck.factory backends = disable_by_file disable_by_file_path = /etc/glance/healthcheck_disable A ready-made application pipeline including this filter is defined e.g. in the ``glance-api-paste.ini`` file, looking like so:: [pipeline:glance-api] pipeline = healthcheck versionnegotiation osprofiler unauthenticated-context rootapp For more information see `oslo.middleware `_. Configuring supported disk formats ---------------------------------- Each image in Glance has an associated disk format property. When creating an image the user specifies a disk format. They must select a format from the set that the Glance service supports. This supported set can be seen by querying the ``/v2/schemas/images`` resource. An operator can add or remove disk formats to the supported set. This is done by setting the ``disk_formats`` parameter which is found in the ``[image_formats]`` section of ``glance-api.conf``. * ``disk_formats=`` Optional. Default: ``ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso`` glance-12.0.0/doc/source/flows.rst0000664000567000056710000000147712701407047020130 0ustar jenkinsjenkins00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Glance Flow Plugins =================== Flows ----- .. list-plugins:: glance.flows :detailed: Import Flows ------------ .. list-plugins:: glance.flows.import :detailed: glance-12.0.0/doc/source/glanceapi.rst0000664000567000056710000006714112701407051020714 0ustar jenkinsjenkins00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Using Glance's Image Public APIs ================================ Glance fully implements versions 1.0, 1.1 and 2.0 of the OpenStack Images API. The Images API specification is developed alongside Glance, but is not considered part of the Glance project. Authentication -------------- Glance depends on Keystone and the OpenStack Identity API to handle authentication of clients. You must obtain an authentication token from Keystone using and send it along with all API requests to Glance through the ``X-Auth-Token`` header. Glance will communicate back to Keystone to verify the token validity and obtain your identity credentials. See :doc:`authentication` for more information on integrating with Keystone. Using v1.X ---------- For the purpose of examples, assume there is a Glance API server running at the URL ``http://glance.example.com`` on the default port 80. List Available Images ********************* We want to see a list of available images that the authenticated user has access to. This includes images owned by the user, images shared with the user and public images. We issue a ``GET`` request to ``http://glance.example.com/v1/images`` to retrieve this list of available images. The data is returned as a JSON-encoded mapping in the following format:: {'images': [ {'uri': 'http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9', 'name': 'Ubuntu 10.04 Plain', 'disk_format': 'vhd', 'container_format': 'ovf', 'size': '5368709120'} ...]} List Available Images in More Detail ************************************ We want to see a more detailed list of available images that the authenticated user has access to. This includes images owned by the user, images shared with the user and public images. We issue a ``GET`` request to ``http://glance.example.com/v1/images/detail`` to retrieve this list of available images. The data is returned as a JSON-encoded mapping in the following format:: {'images': [ {'uri': 'http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9', 'name': 'Ubuntu 10.04 Plain 5GB', 'disk_format': 'vhd', 'container_format': 'ovf', 'size': '5368709120', 'checksum': 'c2e5db72bd7fd153f53ede5da5a06de3', 'created_at': '2010-02-03 09:34:01', 'updated_at': '2010-02-03 09:34:01', 'deleted_at': '', 'status': 'active', 'is_public': true, 'min_ram': 256, 'min_disk': 5, 'owner': null, 'properties': {'distro': 'Ubuntu 10.04 LTS'}}, ...]} .. note:: All timestamps returned are in UTC. The `updated_at` timestamp is the timestamp when an image's metadata was last updated, not its image data, as all image data is immutable once stored in Glance. The `properties` field is a mapping of free-form key/value pairs that have been saved with the image metadata. The `checksum` field is an MD5 checksum of the image file data. The `is_public` field is a boolean indicating whether the image is publicly available. The `min_ram` field is an integer specifying the minimum amount of RAM needed to run this image on an instance, in megabytes. The `min_disk` field is an integer specifying the minimum amount of disk space needed to run this image on an instance, in gigabytes. The `owner` field is a string which may either be null or which will indicate the owner of the image. Filtering Images Lists ********************** Both the ``GET /v1/images`` and ``GET /v1/images/detail`` requests take query parameters that serve to filter the returned list of images. The following list details these query parameters. * ``name=NAME`` Filters images having a ``name`` attribute matching ``NAME``. * ``container_format=FORMAT`` Filters images having a ``container_format`` attribute matching ``FORMAT`` For more information, see :doc:`About Disk and Container Formats ` * ``disk_format=FORMAT`` Filters images having a ``disk_format`` attribute matching ``FORMAT`` For more information, see :doc:`About Disk and Container Formats ` * ``status=STATUS`` Filters images having a ``status`` attribute matching ``STATUS`` For more information, see :doc:`About Image Statuses ` * ``size_min=BYTES`` Filters images having a ``size`` attribute greater than or equal to ``BYTES`` * ``size_max=BYTES`` Filters images having a ``size`` attribute less than or equal to ``BYTES`` These two resources also accept additional query parameters: * ``sort_key=KEY`` Results will be ordered by the specified image attribute ``KEY``. Accepted values include ``id``, ``name``, ``status``, ``disk_format``, ``container_format``, ``size``, ``created_at`` (default) and ``updated_at``. * ``sort_dir=DIR`` Results will be sorted in the direction ``DIR``. Accepted values are ``asc`` for ascending or ``desc`` (default) for descending. * ``marker=ID`` An image identifier marker may be specified. When present, only images which occur after the identifier ``ID`` will be listed. (These are the images that have a `sort_key` later than that of the marker ``ID`` in the `sort_dir` direction.) * ``limit=LIMIT`` When present, the maximum number of results returned will not exceed ``LIMIT``. .. note:: If the specified ``LIMIT`` exceeds the operator defined limit (api_limit_max) then the number of results returned may be less than ``LIMIT``. * ``is_public=PUBLIC`` An admin user may use the `is_public` parameter to control which results are returned. When the `is_public` parameter is absent or set to `True` the following images will be listed: Images whose `is_public` field is `True`, owned images and shared images. When the `is_public` parameter is set to `False` the following images will be listed: Images (owned, shared, or non-owned) whose `is_public` field is `False`. When the `is_public` parameter is set to `None` all images will be listed irrespective of owner, shared status or the `is_public` field. .. note:: Use of the `is_public` parameter is restricted to admin users. For all other users it will be ignored. Retrieve Image Metadata *********************** We want to see detailed information for a specific virtual machine image that the Glance server knows about. We have queried the Glance server for a list of images and the data returned includes the `uri` field for each available image. This `uri` field value contains the exact location needed to get the metadata for a specific image. Continuing the example from above, in order to get metadata about the first image returned, we can issue a ``HEAD`` request to the Glance server for the image's URI. We issue a ``HEAD`` request to ``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9`` to retrieve complete metadata for that image. The metadata is returned as a set of HTTP headers that begin with the prefix ``x-image-meta-``. The following shows an example of the HTTP headers returned from the above ``HEAD`` request:: x-image-meta-uri http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9 x-image-meta-name Ubuntu 10.04 Plain 5GB x-image-meta-disk_format vhd x-image-meta-container_format ovf x-image-meta-size 5368709120 x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3 x-image-meta-created_at 2010-02-03 09:34:01 x-image-meta-updated_at 2010-02-03 09:34:01 x-image-meta-deleted_at x-image-meta-status available x-image-meta-is_public true x-image-meta-min_ram 256 x-image-meta-min_disk 0 x-image-meta-owner null x-image-meta-property-distro Ubuntu 10.04 LTS .. note:: All timestamps returned are in UTC. The `x-image-meta-updated_at` timestamp is the timestamp when an image's metadata was last updated, not its image data, as all image data is immutable once stored in Glance. There may be multiple headers that begin with the prefix `x-image-meta-property-`. These headers are free-form key/value pairs that have been saved with the image metadata. The key is the string after `x-image-meta-property-` and the value is the value of the header. The response's `ETag` header will always be equal to the `x-image-meta-checksum` value. The response's `x-image-meta-is_public` value is a boolean indicating whether the image is publicly available. The response's `x-image-meta-owner` value is a string which may either be null or which will indicate the owner of the image. Retrieve Raw Image Data *********************** We want to retrieve that actual raw data for a specific virtual machine image that the Glance server knows about. We have queried the Glance server for a list of images and the data returned includes the `uri` field for each available image. This `uri` field value contains the exact location needed to get the metadata for a specific image. Continuing the example from above, in order to get metadata about the first image returned, we can issue a ``HEAD`` request to the Glance server for the image's URI. We issue a ``GET`` request to ``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9`` to retrieve metadata for that image as well as the image itself encoded into the response body. The metadata is returned as a set of HTTP headers that begin with the prefix ``x-image-meta-``. The following shows an example of the HTTP headers returned from the above ``GET`` request:: x-image-meta-uri http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9 x-image-meta-name Ubuntu 10.04 Plain 5GB x-image-meta-disk_format vhd x-image-meta-container_format ovf x-image-meta-size 5368709120 x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3 x-image-meta-created_at 2010-02-03 09:34:01 x-image-meta-updated_at 2010-02-03 09:34:01 x-image-meta-deleted_at x-image-meta-status available x-image-meta-is_public true x-image-meta-min_ram 256 x-image-meta-min_disk 5 x-image-meta-owner null x-image-meta-property-distro Ubuntu 10.04 LTS .. note:: All timestamps returned are in UTC. The `x-image-meta-updated_at` timestamp is the timestamp when an image's metadata was last updated, not its image data, as all image data is immutable once stored in Glance. There may be multiple headers that begin with the prefix `x-image-meta-property-`. These headers are free-form key/value pairs that have been saved with the image metadata. The key is the string after `x-image-meta-property-` and the value is the value of the header. The response's `Content-Length` header shall be equal to the value of the `x-image-meta-size` header. The response's `ETag` header will always be equal to the `x-image-meta-checksum` value. The response's `x-image-meta-is_public` value is a boolean indicating whether the image is publicly available. The response's `x-image-meta-owner` value is a string which may either be null or which will indicate the owner of the image. The image data itself will be the body of the HTTP response returned from the request, which will have content-type of `application/octet-stream`. Add a New Image *************** We have created a new virtual machine image in some way (created a "golden image" or snapshotted/backed up an existing image) and we wish to do two things: * Store the disk image data in Glance * Store metadata about this image in Glance We can do the above two activities in a single call to the Glance API. Assuming, like in the examples above, that a Glance API server is running at ``glance.example.com``, we issue a ``POST`` request to add an image to Glance:: POST http://glance.example.com/v1/images The metadata about the image is sent to Glance in HTTP headers. The body of the HTTP request to the Glance API will be the MIME-encoded disk image data. Reserve a New Image ******************* We can also perform the activities described in `Add a New Image`_ using two separate calls to the Image API; the first to register the image metadata, and the second to add the image disk data. This is known as "reserving" an image. The first call should be a ``POST`` to ``http://glance.example.com/v1/images``, which will result in a new image id being registered with a status of ``queued``:: {"image": {"status": "queued", "id": "71c675ab-d94f-49cd-a114-e12490b328d9", ...} ...} The image data can then be added using a ``PUT`` to ``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9``. The image status will then be set to ``active`` by Glance. **Image Metadata in HTTP Headers** Glance will view as image metadata any HTTP header that it receives in a ``POST`` request where the header key is prefixed with the strings ``x-image-meta-`` and ``x-image-meta-property-``. The list of metadata headers that Glance accepts are listed below. * ``x-image-meta-name`` This header is required, unless reserving an image. Its value should be the name of the image. Note that the name of an image *is not unique to a Glance node*. It would be an unrealistic expectation of users to know all the unique names of all other user's images. * ``x-image-meta-id`` This header is optional. When present, Glance will use the supplied identifier for the image. If the identifier already exists in that Glance node, then a **409 Conflict** will be returned by Glance. The value of the header must be a uuid in hexadecimal string notation (that is 71c675ab-d94f-49cd-a114-e12490b328d9). When this header is *not* present, Glance will generate an identifier for the image and return this identifier in the response (see below). * ``x-image-meta-store`` This header is optional. Valid values are one of ``file``, ``s3``, ``rbd``, ``swift``, ``cinder``, ``sheepdog`` or ``vsphere``. When present, Glance will attempt to store the disk image data in the backing store indicated by the value of the header. If the Glance node does not support the backing store, Glance will return a **400 Bad Request**. When not present, Glance will store the disk image data in the backing store that is marked as default. See the configuration option ``default_store`` for more information. * ``x-image-meta-disk_format`` This header is required, unless reserving an image. Valid values are one of ``aki``, ``ari``, ``ami``, ``raw``, ``iso``, ``vhd``, ``vdi``, ``qcow2``, or ``vmdk``. For more information, see :doc:`About Disk and Container Formats `. * ``x-image-meta-container_format`` This header is required, unless reserving an image. Valid values are one of ``aki``, ``ari``, ``ami``, ``bare``, ``ovf``, or ``docker``. For more information, see :doc:`About Disk and Container Formats `. * ``x-image-meta-size`` This header is optional. When present, Glance assumes that the expected size of the request body will be the value of this header. If the length in bytes of the request body *does not match* the value of this header, Glance will return a **400 Bad Request**. When not present, Glance will calculate the image's size based on the size of the request body. * ``x-image-meta-checksum`` This header is optional. When present, it specifies the **MD5** checksum of the image file data. When present, Glance will verify the checksum generated from the back-end store while storing your image against this value and return a **400 Bad Request** if the values do not match. * ``x-image-meta-is_public`` This header is optional. When Glance finds the string "true" (case-insensitive), the image is marked as a public one, meaning that any user may view its metadata and may read the disk image from Glance. When not present, the image is assumed to be *not public* and owned by a user. * ``x-image-meta-min_ram`` This header is optional. When present, it specifies the minimum amount of RAM in megabytes required to run this image on a server. When not present, the image is assumed to have a minimum RAM requirement of 0. * ``x-image-meta-min_disk`` This header is optional. When present, it specifies the expected minimum disk space in gigabytes required to run this image on a server. When not present, the image is assumed to have a minimum disk space requirement of 0. * ``x-image-meta-owner`` This header is optional and only meaningful for admins. Glance normally sets the owner of an image to be the tenant or user (depending on the "owner_is_tenant" configuration option) of the authenticated user issuing the request. However, if the authenticated user has the Admin role, this default may be overridden by setting this header to null or to a string identifying the owner of the image. * ``x-image-meta-property-*`` When Glance receives any HTTP header whose key begins with the string prefix ``x-image-meta-property-``, Glance adds the key and value to a set of custom, free-form image properties stored with the image. The key is a lower-cased string following the prefix ``x-image-meta-property-`` with dashes and punctuation replaced with underscores. For example, if the following HTTP header were sent:: x-image-meta-property-distro Ubuntu 10.10 then a key/value pair of "distro"/"Ubuntu 10.10" will be stored with the image in Glance. There is no limit on the number of free-form key/value attributes that can be attached to the image. However, keep in mind that the 8K limit on the size of all the HTTP headers sent in a request will effectively limit the number of image properties. Update an Image *************** Glance will consider any HTTP header that it receives in a ``PUT`` request as an instance of image metadata. In this case, the header key should be prefixed with the strings ``x-image-meta-`` and ``x-image-meta-property-``. If an image was previously reserved, and thus is in the ``queued`` state, then image data can be added by including it as the request body. If the image already has data associated with it (for example, it is not in the ``queued`` state), then including a request body will result in a **409 Conflict** exception. On success, the ``PUT`` request will return the image metadata encoded as HTTP headers. See more about image statuses here: :doc:`Image Statuses ` List Image Memberships ********************** We want to see a list of the other system tenants (or users, if "owner_is_tenant" is False) that may access a given virtual machine image that the Glance server knows about. We take the `uri` field of the image data, append ``/members`` to it, and issue a ``GET`` request on the resulting URL. Continuing from the example above, in order to get the memberships for the first image returned, we can issue a ``GET`` request to the Glance server for ``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members``. And we will get back JSON data such as the following:: {'members': [ {'member_id': 'tenant1', 'can_share': false} ...]} The `member_id` field identifies a tenant with which the image is shared. If that tenant is authorized to further share the image, the `can_share` field is `true`. List Shared Images ****************** We want to see a list of images which are shared with a given tenant. We issue a ``GET`` request to ``http://glance.example.com/v1/shared-images/tenant1``. We will get back JSON data such as the following:: {'shared_images': [ {'image_id': '71c675ab-d94f-49cd-a114-e12490b328d9', 'can_share': false} ...]} The `image_id` field identifies an image shared with the tenant named by *member_id*. If the tenant is authorized to further share the image, the `can_share` field is `true`. Add a Member to an Image ************************ We want to authorize a tenant to access a private image. We issue a ``PUT`` request to ``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members/tenant1``. With no body, this will add the membership to the image, leaving existing memberships unmodified and defaulting new memberships to have `can_share` set to `false`. We may also optionally attach a body of the following form:: {'member': {'can_share': true} } If such a body is provided, both existing and new memberships will have `can_share` set to the provided value (either `true` or `false`). This query will return a 204 ("No Content") status code. Remove a Member from an Image ***************************** We want to revoke a tenant's right to access a private image. We issue a ``DELETE`` request to ``http://glance.example.com/v1/images/1/members/tenant1``. This query will return a 204 ("No Content") status code. Replace a Membership List for an Image ************************************** The full membership list for a given image may be replaced. We issue a ``PUT`` request to ``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members`` with a body of the following form:: {'memberships': [ {'member_id': 'tenant1', 'can_share': false} ...]} All existing memberships which are not named in the replacement body are removed, and those which are named have their `can_share` settings changed as specified. (The `can_share` setting may be omitted, which will cause that setting to remain unchanged in the existing memberships.) All new memberships will be created, with `can_share` defaulting to `false` unless it is specified otherwise. Image Membership Changes in Version 2.0 --------------------------------------- Version 2.0 of the Images API eliminates the ``can_share`` attribute of image membership. In the version 2.0 model, image sharing is not transitive. In version 2.0, image members have a ``status`` attribute that reflects how the image should be treated with respect to that image member's image-list. * The ``status`` attribute may have one of three values: ``pending``, ``accepted``, or ``rejected``. * By default, only those shared images with status ``accepted`` are included in an image member's image-list. * Only an image member may change his/her own membership status. * Only an image owner may create members on an image. The status of a newly created image member is ``pending``. The image owner cannot change the status of a member. Distinctions from Version 1.x API Calls *************************************** * The response to a request to list the members of an image has changed. call: ``GET`` on ``/v2/images/{imageId}/members`` response: see the JSON schema at ``/v2/schemas/members`` * The request body in the call to create an image member has changed. call: ``POST`` to ``/v2/images/{imageId}/members`` request body:: { "member": "" } where the {memberId} is the tenant ID of the image member. The member status of a newly created image member is ``pending``. New API Calls ************* * Change the status of an image member call: ``PUT`` on ``/v2/images/{imageId}/members/{memberId}`` request body:: { "status": "" } where is ``pending``, ``accepted``, or ``rejected``. The {memberId} is the tenant ID of the image member. Images v2 Tasks API ------------------- Version 2 of the OpenStack Images API introduces a Task resource that is used to create and monitor long-running asynchronous image-related processes. See the :doc:`Tasks ` section of the Glance documentation for more information. The following Task calls are available: Create a Task ************* A user wants to initiate a task. The user issues a ``POST`` request to ``/v2/tasks``. The request body is of Content-type ``application/json`` and must contain the following fields: * ``type``: a string specified by the enumeration defined in the Task schema * ``input``: a JSON object. The content is defined by the cloud provider who has exposed the endpoint being contacted The response is a Task entity as defined by the Task schema. It includes an ``id`` field that can be used in a subsequent call to poll the task for status changes. A task is created in ``pending`` status. Show a Task *********** A user wants to see detailed information about a task the user owns. The user issues a ``GET`` request to ``/v2/tasks/{taskId}``. The response is in ``application/json`` format. The exact structure is given by the task schema located at ``/v2/schemas/task``. List Tasks ********** A user wants to see what tasks have been created in his or her project. The user issues a ``GET`` request to ``/v2/tasks``. The response is in ``application/json`` format. The exact structure is given by the task schema located at ``/v2/schemas/tasks``. Note that, as indicated by the schema, the list of tasks is provided in a sparse format. To see more information about a particular task in the list, the user would use the show task call described above. Filtering and Sorting the Tasks List ************************************ The ``GET /v2/tasks`` request takes query parameters that server to filter the returned list of tasks. The following list details these query parameters. * ``status={status}`` Filters the list to display only those tasks in the specified status. See the task schema or the :doc:`Task Statuses ` section of this documentation for the legal values to use for ``{status}``. For example, a request to ``GET /v2/tasks?status=pending`` would return only those tasks whose current status is ``pending``. * ``type={type}`` Filters the list to display only those tasks of the specified type. See the enumeration defined in the task schema for the legal values to use for ``{type}``. For example, a request to ``GET /v2/tasks?type=import`` would return only import tasks. * ``sort_dir={direction}`` Sorts the list of tasks according to ``updated_at`` datetime. Legal values are ``asc`` (ascending) and ``desc`` (descending). By default, the task list is sorted by ``created_at`` time in descending chronological order. API Message Localization ------------------------ Glance supports HTTP message localization. For example, an HTTP client can receive API messages in Chinese even if the locale language of the server is English. How to use it ************* To receive localized API messages, the HTTP client needs to specify the **Accept-Language** header to indicate the language that will translate the message. For more information about Accept-Language, please refer to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html A typical curl API request will be like below:: curl -i -X GET -H 'Accept-Language: zh' -H 'Content-Type: application/json' http://127.0.0.1:9292/v2/images/aaa Then the response will be like the following:: HTTP/1.1 404 Not Found Content-Length: 234 Content-Type: text/html; charset=UTF-8 X-Openstack-Request-Id: req-54d403a0-064e-4544-8faf-4aeef086f45a Date: Sat, 22 Feb 2014 06:26:26 GMT 404 Not Found

404 Not Found

找不到任何具有标识 aaa 的映像

.. note:: Make sure to have a language package under /usr/share/locale-langpack/ on the target Glance server. glance-12.0.0/doc/source/notifications.rst0000664000567000056710000001234312701407047021641 0ustar jenkinsjenkins00000000000000.. Copyright 2011-2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Notifications ============= Notifications can be generated for several events in the image lifecycle. These can be used for auditing, troubleshooting, etc. Notification Drivers -------------------- * log This driver uses the standard Python logging infrastructure with the notifications ending up in file specificed by the log_file configuration directive. * messaging This strategy sends notifications to a message queue configured using oslo.messaging configuration options. * noop This strategy produces no notifications. It is the default strategy. Notification Types ------------------ * ``image.create`` Emitted when an image record is created in Glance. Image record creation is independent of image data upload. * ``image.prepare`` Emitted when Glance begins uploading image data to its store. * ``image.upload`` Emitted when Glance has completed the upload of image data to its store. * ``image.activate`` Emitted when an image goes to `active` status. This occurs when Glance knows where the image data is located. * ``image.send`` Emitted upon completion of an image being sent to a consumer. * ``image.update`` Emitted when an image record is updated in Glance. * ``image.delete`` Emitted when an image deleted from Glance. * ``task.run`` Emitted when a task is picked up by the executor to be run. * ``task.processing`` Emitted when a task is sent over to the executor to begin processing. * ``task.success`` Emitted when a task is successfully completed. * ``task.failure`` Emitted when a task fails. Content ------- Every message contains a handful of attributes. * message_id UUID identifying the message. * publisher_id The hostname of the glance instance that generated the message. * event_type Event that generated the message. * priority One of WARN, INFO or ERROR. * timestamp UTC timestamp of when event was generated. * payload Data specific to the event type. Payload ------- * image.send The payload for INFO, WARN, and ERROR events contain the following: image_id ID of the image (UUID) owner_id Tenant or User ID that owns this image (string) receiver_tenant_id Tenant ID of the account receiving the image (string) receiver_user_id User ID of the account receiving the image (string) destination_ip The receiver's IP address to which the image was sent (string) bytes_sent The number of bytes actually sent * image.create For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.prepare For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.upload For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.activate For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.update For INFO events, it is the image metadata. WARN and ERROR events contain a text message in the payload. * image.delete For INFO events, it is the image id. WARN and ERROR events contain a text message in the payload. * task.run The payload for INFO, WARN, and ERROR events contain the following: task_id ID of the task (UUID) owner Tenant or User ID that created this task (string) task_type Type of the task. Example, task_type is "import". (string) status, status of the task. Status can be "pending", "processing", "success" or "failure". (string) task_input Input provided by the user when attempting to create a task. (dict) result Resulting output from a successful task. (dict) message Message shown in the task if it fails. None if task succeeds. (string) expires_at UTC time at which the task would not be visible to the user. (string) created_at UTC time at which the task was created. (string) updated_at UTC time at which the task was latest updated. (string) The exceptions are:- For INFO events, it is the task dict with result and message as None. WARN and ERROR events contain a text message in the payload. * task.processing For INFO events, it is the task dict with result and message as None. WARN and ERROR events contain a text message in the payload. * task.success For INFO events, it is the task dict with message as None and result is a dict. WARN and ERROR events contain a text message in the payload. * task.failure For INFO events, it is the task dict with result as None and message is text. WARN and ERROR events contain a text message in the payload. glance-12.0.0/doc/source/glancemetadefcatalogapi.rst0000664000567000056710000004553012701407047023600 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2014 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Using Glance's Metadata Definitions Catalog Public APIs ======================================================= A common API hosted by the Glance service for vendors, admins, services, and users to meaningfully define available key / value pair and tag metadata. The intent is to enable better metadata collaboration across artifacts, services, and projects for OpenStack users. This is about the definition of the available metadata that can be used on different types of resources (images, artifacts, volumes, flavors, aggregates, etc). A definition includes the properties type, its key, it's description, and it's constraints. This catalog will not store the values for specific instance properties. For example, a definition of a virtual CPU topology property for number of cores will include the key to use, a description, and value constraints like requiring it to be an integer. So, a user, potentially through Horizon, would be able to search this catalog to list the available properties they can add to a flavor or image. They will see the virtual CPU topology property in the list and know that it must be an integer. In the Horizon example, when the user adds the property, its key and value will be stored in the service that owns that resource (Nova for flavors and in Glance for images). Diagram: https://wiki.openstack.org/w/images/b/bb/Glance-Metadata-API.png Glance Metadata Definitions Catalog implementation started with API version v2. Authentication -------------- Glance depends on Keystone and the OpenStack Identity API to handle authentication of clients. You must obtain an authentication token from Keystone send it along with all API requests to Glance through the ``X-Auth-Token`` header. Glance will communicate back to Keystone to verify the token validity and obtain your identity credentials. See :doc:`authentication` for more information on integrating with Keystone. Using v2.X ---------- For the purpose of examples, assume there is a Glance API server running at the URL ``http://glance.example.com`` on the default port 80. List Available Namespaces ************************* We want to see a list of available namespaces that the authenticated user has access to. This includes namespaces owned by the user, namespaces shared with the user and public namespaces. We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/namespaces`` to retrieve this list of available namespaces. The data is returned as a JSON-encoded mapping in the following format:: { "namespaces": [ { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": true, "owner": "The Test Owner", "self": "/v2/metadefs/namespaces/MyNamespace", "schema": "/v2/schemas/metadefs/namespace", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "resource_type_associations": [ { "name": "OS::Nova::Aggregate", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" }, { "name": "OS::Nova::Flavor", "prefix": "aggregate_instance_extra_specs:", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" } ] } ], "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc", "schema": "/v2/schemas/metadefs/namespaces" } .. note:: Listing namespaces will only show the summary of each namespace including counts and resource type associations. Detailed response including all its objects definitions, property definitions etc. will only be available on each individual GET namespace request. Filtering Namespaces Lists ************************** ``GET /v2/metadefs/namespaces`` requests take query parameters that serve to filter the returned list of namespaces. The following list details these query parameters. * ``resource_types=RESOURCE_TYPES`` Filters namespaces having a ``resource_types`` within the list of comma separated ``RESOURCE_TYPES``. GET resource also accepts additional query parameters: * ``sort_key=KEY`` Results will be ordered by the specified sort attribute ``KEY``. Accepted values include ``namespace``, ``created_at`` (default) and ``updated_at``. * ``sort_dir=DIR`` Results will be sorted in the direction ``DIR``. Accepted values are ``asc`` for ascending or ``desc`` (default) for descending. * ``marker=NAMESPACE`` A namespace identifier marker may be specified. When present only namespaces which occur after the identifier ``NAMESPACE`` will be listed, i.e. the namespaces which have a `sort_key` later than that of the marker ``NAMESPACE`` in the `sort_dir` direction. * ``limit=LIMIT`` When present the maximum number of results returned will not exceed ``LIMIT``. .. note:: If the specified ``LIMIT`` exceeds the operator defined limit (api_limit_max) then the number of results returned may be less than ``LIMIT``. * ``visibility=PUBLIC`` An admin user may use the `visibility` parameter to control which results are returned (PRIVATE or PUBLIC). Retrieve Namespace ****************** We want to see a more detailed information about a namespace that the authenticated user has access to. The detail includes the properties, objects, and resource type associations. We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/namespaces/{namespace}`` to retrieve the namespace details. The data is returned as a JSON-encoded mapping in the following format:: { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": true, "owner": "The Test Owner", "schema": "/v2/schemas/metadefs/namespace", "resource_type_associations": [ { "name": "OS::Glance::Image", "prefix": "hw_", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" }, { "name": "OS::Cinder::Volume", "prefix": "hw_", "properties_target": "image", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" }, { "name": "OS::Nova::Flavor", "prefix": "filter1:", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z" } ], "properties": { "nsprop1": { "title": "My namespace property1", "description": "More info here", "type": "boolean", "default": true }, "nsprop2": { "title": "My namespace property2", "description": "More info here", "type": "string", "default": "value1" } }, "objects": [ { "name": "object1", "description": "my-description", "self": "/v2/metadefs/namespaces/MyNamespace/objects/object1", "schema": "/v2/schemas/metadefs/object", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "required": [], "properties": { "prop1": { "title": "My object1 property1", "description": "More info here", "type": "array", "items": { "type": "string" } } } }, { "name": "object2", "description": "my-description", "self": "/v2/metadefs/namespaces/MyNamespace/objects/object2", "schema": "/v2/schemas/metadefs/object", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "properties": { "prop1": { "title": "My object2 property1", "description": "More info here", "type": "integer", "default": 20 } } } ] } Retrieve available Resource Types ********************************* We want to see the list of all resource types that are available in Glance We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/resource_types`` to retrieve all resource types. The data is returned as a JSON-encoded mapping in the following format:: { "resource_types": [ { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Glance::Image", "updated_at": "2014-08-28T17:13:04Z" }, { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Cinder::Volume", "updated_at": "2014-08-28T17:13:04Z" }, { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Nova::Flavor", "updated_at": "2014-08-28T17:13:04Z" }, { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Nova::Aggregate", "updated_at": "2014-08-28T17:13:04Z" }, { "created_at": "2014-08-28T17:13:04Z", "name": "OS::Nova::Server", "updated_at": "2014-08-28T17:13:04Z" } ] } Retrieve Resource Types associated with a Namespace *************************************************** We want to see the list of resource types that are associated for a specific namespace We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/namespaces/{namespace}/resource_types`` to retrieve resource types. The data is returned as a JSON-encoded mapping in the following format:: { "resource_type_associations" : [ { "name" : "OS::Glance::Image", "prefix" : "hw_", "created_at": "2014-08-28T17:13:04Z", "updated_at": "2014-08-28T17:13:04Z" }, { "name" :"OS::Cinder::Volume", "prefix" : "hw_", "properties_target" : "image", "created_at": "2014-08-28T17:13:04Z", "updated_at": "2014-08-28T17:13:04Z" }, { "name" : "OS::Nova::Flavor", "prefix" : "hw:", "created_at": "2014-08-28T17:13:04Z", "updated_at": "2014-08-28T17:13:04Z" } ] } Add Namespace ************* We want to create a new namespace that can contain the properties, objects, etc. We issue a ``POST`` request to add an namespace to Glance:: POST http://glance.example.com/v2/metadefs/namespaces/ The input data is an JSON-encoded mapping in the following format:: { "namespace": "MyNamespace", "display_name": "My User Friendly Namespace", "description": "My description", "visibility": "public", "protected": true } .. note:: Optionally properties, objects and resource type associations could be added in the same input. See GET Namespace output above(input will be similar). Update Namespace **************** We want to update an existing namespace We issue a ``PUT`` request to update an namespace to Glance:: PUT http://glance.example.com/v2/metadefs/namespaces/{namespace} The input data is similar to Add Namespace Delete Namespace **************** We want to delete an existing namespace including all its objects, properties etc. We issue a ``DELETE`` request to delete an namespace to Glance:: DELETE http://glance.example.com/v2/metadefs/namespaces/{namespace} Associate Resource Type with Namespace ************************************** We want to associate a resource type with an existing namespace We issue a ``POST`` request to associate resource type to Glance:: POST http://glance.example.com/v2/metadefs/namespaces/{namespace}/resource_types The input data is an JSON-encoded mapping in the following format:: { "name" :"OS::Cinder::Volume", "prefix" : "hw_", "properties_target" : "image", "created_at": "2014-08-28T17:13:04Z", "updated_at": "2014-08-28T17:13:04Z" } Remove Resource Type associated with a Namespace ************************************************ We want to de-associate namespace from a resource type We issue a ``DELETE`` request to de-associate namespace resource type to Glance:: DELETE http://glance.example.com/v2//metadefs/namespaces/{namespace}/resource_types/{resource_type} List Objects in Namespace ************************* We want to see the list of meta definition objects in a specific namespace We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/namespaces/{namespace}/objects`` to retrieve objects. The data is returned as a JSON-encoded mapping in the following format:: { "objects": [ { "name": "object1", "description": "my-description", "self": "/v2/metadefs/namespaces/MyNamespace/objects/object1", "schema": "/v2/schemas/metadefs/object", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "required": [], "properties": { "prop1": { "title": "My object1 property1", "description": "More info here", "type": "array", "items": { "type": "string" } } } }, { "name": "object2", "description": "my-description", "self": "/v2/metadefs/namespaces/MyNamespace/objects/object2", "schema": "/v2/schemas/metadefs/object", "created_at": "2014-08-28T17:13:06Z", "updated_at": "2014-08-28T17:13:06Z", "properties": { "prop1": { "title": "My object2 property1", "description": "More info here", "type": "integer", "default": 20 } } } ], "schema": "/v2/schemas/metadefs/objects" } Add object in a specific namespace ********************************** We want to create a new object which can group the properties We issue a ``POST`` request to add object to a namespace in Glance:: POST http://glance.example.com/v2/metadefs/namespaces/{namespace}/objects The input data is an JSON-encoded mapping in the following format:: { "name": "StorageQOS", "description": "Our available storage QOS.", "required": [ "minIOPS" ], "properties": { "minIOPS": { "type": "integer", "description": "The minimum IOPs required", "default": 100, "minimum": 100, "maximum": 30000369 }, "burstIOPS": { "type": "integer", "description": "The expected burst IOPs", "default": 1000, "minimum": 100, "maximum": 30000377 } } } Update Object in a specific namespace ************************************* We want to update an existing object We issue a ``PUT`` request to update an object to Glance:: PUT http://glance.example.com/v2/metadefs/namespaces/{namespace}/objects/{object_name} The input data is similar to Add Object Delete Object in a specific namespace ************************************* We want to delete an existing object. We issue a ``DELETE`` request to delete object in a namespace to Glance:: DELETE http://glance.example.com/v2/metadefs/namespaces/{namespace}/objects/{object_name} Add property definition in a specific namespace *********************************************** We want to create a new property definition in a namespace We issue a ``POST`` request to add property definition to a namespace in Glance:: POST http://glance.example.com/v2/metadefs/namespaces/{namespace}/properties The input data is an JSON-encoded mapping in the following format:: { "name": "hypervisor_type", "title" : "Hypervisor", "type": "array", "description": "The type of hypervisor required", "items": { "type": "string", "enum": [ "hyperv", "qemu", "kvm" ] } } Update property definition in a specific namespace ************************************************** We want to update an existing object We issue a ``PUT`` request to update an property definition in a namespace to Glance:: PUT http://glance.example.com/v2/metadefs/namespaces/{namespace}/properties/{property_name} The input data is similar to Add property definition Delete property definition in a specific namespace ************************************************** We want to delete an existing object. We issue a ``DELETE`` request to delete property definition in a namespace to Glance:: DELETE http://glance.example.com/v2/metadefs/namespaces/{namespace}/properties/{property_name} API Message Localization ------------------------ Glance supports HTTP message localization. For example, an HTTP client can receive API messages in Chinese even if the locale language of the server is English. How to use it ************* To receive localized API messages, the HTTP client needs to specify the **Accept-Language** header to indicate the language to use to translate the message. For more info about Accept-Language, please refer http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html A typical curl API request will be like below:: curl -i -X GET -H 'Accept-Language: zh' -H 'Content-Type: application/json' http://127.0.0.1:9292/v2/metadefs/namespaces/{namespace} Then the response will be like the following:: HTTP/1.1 404 Not Found Content-Length: 234 Content-Type: text/html; charset=UTF-8 X-Openstack-Request-Id: req-54d403a0-064e-4544-8faf-4aeef086f45a Date: Sat, 22 Feb 2014 06:26:26 GMT 404 Not Found

404 Not Found

找不到任何具有标识 aaa 的映像

.. note:: Be sure there is the language package under /usr/share/locale-langpack/ on the target Glance server. glance-12.0.0/doc/source/db.rst0000664000567000056710000000327212701407047017356 0ustar jenkinsjenkins00000000000000.. Copyright 2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Database Management =================== The default metadata driver for Glance uses sqlalchemy, which implies there exists a backend database which must be managed. The ``glance-manage`` binary provides a set of commands for making this easier. The commands should be executed as a subcommand of 'db': glance-manage db Sync the Database ----------------- glance-manage db sync Place a database under migration control and upgrade, creating it first if necessary. Determining the Database Version -------------------------------- glance-manage db version This will print the current migration level of a Glance database. Upgrading an Existing Database ------------------------------ glance-manage db upgrade This will take an existing database and upgrade it to the specified VERSION. Downgrading an Existing Database -------------------------------- glance-manage db downgrade This will downgrade an existing database from the current version to the specified VERSION. glance-12.0.0/doc/source/conf.py0000664000567000056710000002201112701407047017526 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright (c) 2010 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Glance documentation build configuration file, created by # sphinx-quickstart on Tue May 18 13:50:15 2010. # # This file is execfile()'d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import subprocess import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path = [ os.path.abspath('../..'), os.path.abspath('../../bin') ] + sys.path # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'oslosphinx', 'stevedore.sphinxext', 'oslo_config.sphinxext', 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Glance' copyright = u'2010-2014, OpenStack Foundation.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from glance.version import version_info as glance_version # The full version, including alpha/beta/rc tags. release = glance_version.version_string_with_vcs() # The short X.Y version. version = glance_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. #exclude_trees = ['api'] exclude_patterns = [ # The man directory includes some snippet files that are included # in other documents during the build but that should not be # included in the toctree themselves, so tell Sphinx to ignore # them when scanning for input files. 'man/footer.rst', 'man/general_options.rst', 'man/openstack_options.rst', ] # The reST default role (for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['glance.'] # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/glanceapi', 'glance-api', u'Glance API Server', [u'OpenStack'], 1), ('man/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner', [u'OpenStack'], 1), ('man/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager', [u'OpenStack'], 1), ('man/glancecacheprefetcher', 'glance-cache-prefetcher', u'Glance Cache Pre-fetcher', [u'OpenStack'], 1), ('man/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner', [u'OpenStack'], 1), ('man/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ', [u'OpenStack'], 1), ('man/glancemanage', 'glance-manage', u'Glance Management Utility', [u'OpenStack'], 1), ('man/glanceregistry', 'glance-registry', u'Glance Registry Server', [u'OpenStack'], 1), ('man/glancereplicator', 'glance-replicator', u'Glance Replicator', [u'OpenStack'], 1), ('man/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service', [u'OpenStack'], 1) ] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen(git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'glancedoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). latex_documents = [ ('index', 'Glance.tex', u'Glance Documentation', u'Glance Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True glance-12.0.0/doc/source/_static/0000775000567000056710000000000012701407204017654 5ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/_static/.placeholder0000664000567000056710000000000012701407047022132 0ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/database_architecture.rst0000664000567000056710000002027712701407047023303 0ustar jenkinsjenkins00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Glance database architecture ============================ Glance Database Public API ~~~~~~~~~~~~~~~~~~~~~~~~~~ The Glance Database API contains several methods for moving image metadata to and from persistent storage. You can find a list of public methods grouped by category below. Common parameters for image methods ----------------------------------- The following parameters can be applied to all of the image methods below: - ``context`` — corresponds to a glance.context.RequestContext object, which stores the information on how a user accesses the system, as well as additional request information. - ``image_id`` — a string corresponding to the image identifier. - ``memb_id`` — a string corresponding to the member identifier of the image. Image basic methods ------------------- **Image processing methods:** #. ``image_create(context, values)`` — creates a new image record with parameters listed in the *values* dictionary. Returns a dictionary representation of a newly created *glance.db.sqlalchemy.models.Image* object. #. ``image_update(context, image_id, values, purge_props=False, from_state=None)`` — updates the existing image with the identifier *image_id* with the values listed in the *values* dictionary. Returns a dictionary representation of a newly created *Image* object. Optional parameters are: - ``purge_props`` — a flag indicating that all the existing properties not listed in the *values['properties']* should be deleted; - ``from_state`` — a string filter indicating that the updated image must be in the specified state. #. ``image_destroy(context, image_id)`` — deletes all the database record of an image with the identifier *image_id* (like tags, properties, and members) and sets a 'deleted' status on all the image locations. #. ``image_get(context, image_id, force_show_deleted=False)`` — gets an image with the identifier *image_id* and returns its dictionary representation. The parameter *force_show_deleted* is a flag that indicates to show image info even if it was 'deleted', or its 'pending_delete' statuses. #. ``image_get_all(context, filters=None, marker=None, limit=None, sort_key=None, sort_dir=None, member_status='accepted', is_public=None, admin_as_user=False, return_tag=False)`` — gets all the images that match zero or more filters. Optional parameters are: - ``filters`` — dictionary of filter keys and values. If a 'properties' key is present, it is treated as a dictionary of key/value filters in the attribute of the image properties. - ``marker`` — image id after which a page should start. - ``limit`` — maximum number of images to return. - ``sort_key`` — list of image attributes by which results should be sorted. - ``sort_dir`` — direction in which results should be sorted (asc, desc). - ``member_status`` — only returns shared images that have this membership status. - ``is_public`` — if true, returns only public images. If false, returns only private and shared images. - ``admin_as_user`` — for backwards compatibility. If true, an admin sees the same set of images that would be seen by a regular user. - ``return_tag`` — indicates whether an image entry in the result includes its relevant tag entries. This can improve upper-layer query performance and avoid using separate calls. Image location methods ---------------------- **Image location processing methods:** #. ``image_location_add(context, image_id, location)`` — adds a new location to an image with the identifier *image_id*. This location contains values listed in the dictionary *location*. #. ``image_location_update(context, image_id, location)`` — updates an existing location with the identifier *location['id']* for an image with the identifier *image_id* with values listed in the dictionary *location*. #. ``image_location_delete(context, image_id, location_id, status, delete_time=None)`` — sets a 'deleted' or 'pending_delete' *status* to an existing location record with the identifier *location_id* for an image with the identifier *image_id*. Image property methods ---------------------- .. warning:: There is no public property update method. So if you want to modify it, you have to delete it first and then create a new one. **Image property processing methods:** #. ``image_property_create(context, values)`` — creates a property record with parameters listed in the *values* dictionary for an image with *values['id']*. Returns a dictionary representation of a newly created *ImageProperty* object. #. ``image_property_delete(context, prop_ref, image_ref)`` — deletes an existing property record with a name *prop_ref* for an image with the identifier *image_ref*. Image member methods -------------------- **Methods to handle image memberships:** #. ``image_member_create(context, values)`` — creates a member record with properties listed in the *values* dictionary for an image with *values['id']*. Returns a dictionary representation of a newly created *ImageMember* object. #. ``image_member_update(context, memb_id, values)`` — updates an existing member record with properties listed in the *values* dictionary for an image with *values['id']*. Returns a dictionary representation of an updated member record. #. ``image_member_delete(context, memb_id)`` — deletes an existing member record with *memb_id*. #. ``image_member_find(context, image_id=None, member=None, status=None)`` — returns all members for a given context with optional image identifier (*image_id*), member name (*member*), and member status (*status*) parameters. #. ``image_member_count(context, image_id)`` — returns a number of image members for an image with *image_id*. Image tag methods ----------------- **Methods to process images tags:** #. ``image_tag_set_all(context, image_id, tags)`` — changes all the existing tags for an image with *image_id* to the tags listed in the *tags* param. To remove all tags, a user just should provide an empty list. #. ``image_tag_create(context, image_id, value)`` — adds a *value* to tags for an image with *image_id*. Returns the value of a newly created tag. #. ``image_tag_delete(context, image_id, value)`` — removes a *value* from tags for an image with *image_id*. #. ``image_tag_get_all(context, image_id)`` — returns a list of tags for a specific image. Image info methods ------------------ The next two methods inform a user about his or her ability to modify and view an image. The *image* parameter here is a dictionary representation of an *Image* object. #. ``is_image_mutable(context, image)`` — informs a user about the possibility to modify an image with the given context. Returns True if the image is mutable in this context. #. ``is_image_visible(context, image, status=None)`` — informs about the possibility to see the image details with the given context and optionally with a status. Returns True if the image is visible in this context. **Glance database schema** .. figure:: /images/glance_db.png :figwidth: 100% :align: center :alt: Glance images DB schema .. centered:: Image 1. Glance images DB schema Glance Database Backends ~~~~~~~~~~~~~~~~~~~~~~~~ Migration Backends ------------------ .. list-plugins:: glance.database.migration_backend :detailed: Metadata Backends ----------------- .. list-plugins:: glance.database.metadata_backend :detailed: glance-12.0.0/doc/source/opts/0000775000567000056710000000000012701407204017213 5ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/opts/index.rst0000664000567000056710000000063512701407051021060 0ustar jenkinsjenkins00000000000000============================= Glance Configuration Options ============================= This section provides a list of all possible options for each configuration file. Refer to :doc:`Basic Configuration ` for a detailed guide in getting started with various option settings. Glance uses the following configuration files for its various services. .. toctree:: :glob: :maxdepth: 1 * glance-12.0.0/doc/source/opts/glance_cache.rst0000664000567000056710000000023612701407047022327 0ustar jenkinsjenkins00000000000000.. _glance-cache.conf: ----------------- glance-cache.conf ----------------- .. show-options:: :config-file: etc/oslo-config-generator/glance-cache.conf glance-12.0.0/doc/source/opts/glance_scrubber.rst0000664000567000056710000000025512701407047023074 0ustar jenkinsjenkins00000000000000.. _glance-scrubber.conf: -------------------- glance-scrubber.conf -------------------- .. show-options:: :config-file: etc/oslo-config-generator/glance-scrubber.conf glance-12.0.0/doc/source/opts/glance_api.rst0000664000567000056710000000022412701407047022032 0ustar jenkinsjenkins00000000000000.. _glance-api.conf: --------------- glance-api.conf --------------- .. show-options:: :config-file: etc/oslo-config-generator/glance-api.conf glance-12.0.0/doc/source/opts/glance_registry.rst0000664000567000056710000000047212701407047023136 0ustar jenkinsjenkins00000000000000.. _glance-registry.conf: -------------------- glance-registry.conf -------------------- This configuration file controls how the register server operates. More information can be found in :ref:`configuring-the-glance-registry`. .. show-options:: :config-file: etc/oslo-config-generator/glance-registry.conf glance-12.0.0/doc/source/opts/glance_manage.rst0000664000567000056710000000024312701407047022512 0ustar jenkinsjenkins00000000000000.. _glance-manage.conf: ------------------ glance-manage.conf ------------------ .. show-options:: :config-file: etc/oslo-config-generator/glance-manage.conf glance-12.0.0/doc/source/metadefs-concepts.rst0000664000567000056710000002024312701407047022372 0ustar jenkinsjenkins00000000000000.. Copyright (c) 2014 Hewlett-Packard Development Company, L.P. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Metadata Definition Concepts ============================ The metadata definition service was added to Glance in the Juno release of OpenStack. It provides a common API for vendors, admins, services, and users to meaningfully **define** available key / value pair metadata that can be used on different types of resources (images, artifacts, volumes, flavors, aggregates, etc). A definition includes a property's key, its description, its constraints, and the resource types to which it can be associated. This catalog does not store the values for specific instance properties. For example, a definition of a virtual CPU topology property for the number of cores will include the base key to use (for example, cpu_cores), a description, and value constraints like requiring it to be an integer. So, a user, potentially through Horizon, would be able to search this catalog to list the available properties they can add to a flavor or image. They will see the virtual CPU topology property in the list and know that it must be an integer. When the user adds the property its key and value will be stored in the service that owns that resource (for example, Nova for flavors and in Glance for images). The catalog also includes any additional prefix required when the property is applied to different types of resources, such as "hw\_" for images and "hw:" for flavors. So, on an image, the user would know to set the property as "hw_cpu_cores=1". Terminology ----------- Background ~~~~~~~~~~ The term *metadata* can become very overloaded and confusing. This catalog is about the additional metadata that is passed as arbitrary key / value pairs or tags across various artifacts and OpenStack services. Below are a few examples of the various terms used for metadata across OpenStack services today: +-------------------------+---------------------------+----------------------+ | Nova | Cinder | Glance | +=========================+===========================+======================+ | Flavor | Volume & Snapshot | Image & Snapshot | | + *extra specs* | + *image metadata* | + *properties* | | Host Aggregate | + *metadata* | + *tags* | | + *metadata* | VolumeType | | | Servers | + *extra specs* | | | + *metadata* | + *qos specs* | | | + *scheduler_hints* | | | | + *tags* | | | +-------------------------+---------------------------+----------------------+ Catalog Concepts ~~~~~~~~~~~~~~~~ The below figure illustrates the concept terminology used in the metadata definitions catalog:: A namespace is associated with 0 to many resource types, making it visible to the API / UI for applying to that type of resource. RBAC Permissions are managed at a namespace level. +----------------------------------------------+ | Namespace | | | | +-----------------------------------------+ | | | Object Definition | | | | | | +--------------------+ | | +-------------------------------------+ | | +--> | Resource Type: | | | | Property Definition A (key=integer) | | | | | e.g. Nova Flavor | | | +-------------------------------------+ | | | +--------------------+ | | | | | | | +-------------------------------------+ | | | | | | Property Definition B (key=string) | | | | +--------------------+ | | +-------------------------------------+ | +--+--> | Resource Type: | | | | | | | e.g. Glance Image | | +-----------------------------------------+ | | +--------------------+ | | | | +-------------------------------------+ | | | | Property Definition C (key=boolean) | | | +--------------------+ | +-------------------------------------+ | +--> | Resource Type: | | | | e.g. Cinder Volume | +----------------------------------------------+ +--------------------+ Properties may be defined standalone or within the context of an object. Catalog Terminology ~~~~~~~~~~~~~~~~~~~ The following terminology is used within the metadata definition catalog. **Namespaces** Metadata definitions are contained in namespaces. - Specify the access controls (CRUD) for everything defined in it. Allows for admin only, different projects, or the entire cloud to define and use the definitions in the namespace - Associates the contained definitions to different types of resources **Properties** A property describes a single property and its primitive constraints. Each property can ONLY be a primitive type: * string, integer, number, boolean, array Each primitive type is described using simple JSON schema notation. This means NO nested objects and no definition referencing. **Objects** An object describes a group of one to many properties and their primitive constraints. Each property in the group can ONLY be a primitive type: * string, integer, number, boolean, array Each primitive type is described using simple JSON schema notation. This means NO nested objects. The object may optionally define required properties under the semantic understanding that a user who uses the object should provide all required properties. **Resource Type Association** Resource type association specifies the relationship between resource types and the namespaces that are applicable to them. This information can be used to drive UI and CLI views. For example, the same namespace of objects, properties, and tags may be used for images, snapshots, volumes, and flavors. Or a namespace may only apply to images. Resource types should be aligned with Heat resource types whenever possible. http://docs.openstack.org/developer/heat/template_guide/openstack.html It is important to note that the same base property key can require different prefixes depending on the target resource type. The API provides a way to retrieve the correct property based on the target resource type. Below are a few examples: The desired virtual CPU topology can be set on both images and flavors via metadata. The keys have different prefixes on images than on flavors. On flavors keys are prefixed with ``hw:``, but on images the keys are prefixed with ``hw_``. For more: https://github.com/openstack/nova-specs/blob/master/specs/juno/implemented/virt-driver-vcpu-topology.rst Another example is the AggregateInstanceExtraSpecsFilter and scoped properties (e.g. properties with something:something=value). For scoped / namespaced properties, the AggregateInstanceExtraSpecsFilter requires a prefix of "aggregate_instance_extra_specs:" to be used on flavors but not on the aggregate itself. Otherwise, the filter will not evaluate the property during scheduling. So, on a host aggregate, you may see: companyx:fastio=true But then when used on the flavor, the AggregateInstanceExtraSpecsFilter needs: aggregate_instance_extra_specs:companyx:fastio=true In some cases, there may be multiple different filters that may use the same property with different prefixes. In this case, the correct prefix needs to be set based on which filter is enabled. glance-12.0.0/doc/source/installing.rst0000664000567000056710000001113212701407047021127 0ustar jenkinsjenkins00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Installation ============ Installing from packages ~~~~~~~~~~~~~~~~~~~~~~~~ To install the latest released version of Glance, follow the following instructions. Debian, Ubuntu ############## 1. Add the Glance PPA to your sources.lst:: $> sudo add-apt-repository ppa:glance-core/trunk $> sudo apt-get update 2. Install Glance:: $> sudo apt-get install glance Red Hat, Fedora ############### Only RHEL 6, Fedora 18, and newer releases have the necessary components packaged. On RHEL 6, enable the EPEL repository. Install Glance:: $ su - # yum install openstack-glance openSUSE, SLE ############# openSUSE 13.2, SLE 12, and the rolling release Factory needs an extra repository enabled to install all the OpenStack packages. Search the proper repository in the `Cloud:OpenStack:Master `_ project. For example, for openSUSE 13.2: 1. Add the OpenStack master repository:: $ sudo zypper ar -f -g http://download.opensuse.org/repositories/Cloud:/OpenStack:/Master/openSUSE_13.2/ OpenStack $ sudo zypper ref 2. Install Glance:: $ sudo zypper in openstack-glance Installing from source tarballs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To install the latest version of Glance from the Launchpad Bazaar repositories, follow the following instructions. 1. Grab the source tarball from `Launchpad `_ 2. Untar the source tarball:: $> tar -xzf 3. Change into the package directory and build/install:: $> cd glance- $> sudo python setup.py install Installing from Git ~~~~~~~~~~~~~~~~~~~ To install the latest version of Glance from the GitHub Git repositories, follow the following instructions. Debian, Ubuntu ############## 1. Install Git and build dependencies:: $> sudo apt-get install git $> sudo apt-get build-dep glance .. note:: If you want to build the Glance documentation locally, you will also want to install the python-sphinx package 2. Clone Glance's trunk branch from GitHub:: $> git clone git://github.com/openstack/glance $> cd glance 3. Install Glance:: $> sudo python setup.py install Red Hat, Fedora ############### On Fedora, most developers and essentially all users install packages. Instructions below are not commonly used, and even then typically in a throw-away VM. Since normal build dependencies are resolved by mechanisms of RPM, there is no one-line command to install everything needed by the source repository in git. One common way to discover the dependencies is to search for *BuildRequires:* in the specfile of openstack-glance for the appropriate distro. In case of Fedora 16, for example, do this:: $ su - # yum install git # yum install python2-devel python-setuptools python-distutils-extra # yum install python-webob python-eventlet # yum install python-virtualenv Build Glance:: $ python setup.py build If any missing modules crop up, install them with yum, then retry the build. .. note:: If you want to build the Glance documentation, you will also want to install the packages python-sphinx and graphviz, then run "python setup.py build_sphinx". Due to required features of python-sphinx 1.0 or better, documentation can only be built on Fedora 15 or later. Test the build:: $ ./run_tests.sh -s Once Glance is built and tested, install it:: $ su - # python setup.py install openSUSE, SLE ############# On openSUSE and SLE (also this is valid for Factory), we can install all the build dependencies using Zypper. 1. Install Git and build dependencies:: $ sudo zypper install git $ sudo zypper source-install -d openstack-glance .. note:: If you want to build the Glance documentation locally, you will also want to install the packages python-sphinx and graphviz. 2. Clone Glance's trunk branch from GitHub:: $ git clone git://github.com/openstack/glance $ cd glance 3. Install Glance:: $ sudo python setup.py install glance-12.0.0/doc/source/images/0000775000567000056710000000000012701407204017473 5ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/images/glance_db.png0000664000567000056710000032274612701407047022122 0ustar jenkinsjenkins00000000000000PNG  IHDR0*5~IDATxXTY}Cw#UTr$̱9 (rΒs9* ;̝{op8T%"z|6N:op 7pmna nnnHnᆁnnna nᆁnnHna nnnHn6@?J_pQ]Gn!_?pm_̆c7p{_ ׮][#$6涜oSn/m۶9&%?_.&"$ɓ'` &$pm@~I9|6>!|БOz;bBp흁?fE 77Vl3#s孭$ȓ$ 7p?W갽O\ y??H;njkk_|Wp wҿo $lgϞqn RVVViiiKKKn3MN Ef5t~yo̒OHۗfξew\ s}T 555}@b7n}AE3ܵ5_ #BJn_DC i!>L_4 iL}HۿCJCNgI*;z<]FYid.^etu藛o;<}]LFUJAVDg6nA=#!vƉ2e6)xg4F2|^8 }Sײr V4c1&O/]zV n˖8z 2AI_TnkE@[~KVaN:x tPdr: \d):W:|ł6wO<7V x \p|:ܶ 9 dx_J͖8^:?Aϰ왳x{pM C#Qm[d/^*lHWWׯK0!&}]@1:ɇԇ8#ݑYM`*;XD( )u@ 'o4Wi>y˳X{ɎvetJER/۵k׶I`-l_Tý/^H 4 YGI 5+?Q_y  \䚕mR _W9S8f1Eڍ[X6i+S?ɏܰikmW $ !!ak kiiy˗/!O?4 $Hdҳx m8H^MZi<4ᒱ8eT䠴 ?gB vрtA @ JM*͛7$qWf Pk8pO,8A!ip|!\.o3-\h :x^JN6У|k*F2G %U3>pZ@^W\_\\\[[ŋ*|u%-brl,&*.avD;إ|+雷RF#4(Y\@@p->- .j OdKxF{`n:̜5Nl95gMۤ( /4 W܊ H!޹W A2ڣpGU12ƋCEQH.NW' $lؾ* eggBUk<}9H =Zο10&;A}^ڕ3,MS/sB#xhf1@@Xc)]`@  SibY& 6 $ I dr'HH00& > $;@@†4Y4,Y  %2iN0n0}T $;@R<},b{fO۟FX7i5hu{;z kcdւk&dM#^X=iV7lxUf5`V=h/ k*k}Y9^UrҚI{^F&JGZ FgȊ@X0M@=Y& 0Aޱ(4x/u~+6 5pW#j8Ѩ=i2 9ШF%\iP ˤ;@Hh~4]h;ƒp8hsCшH/8  ` }2 N`nTMTѨ#:FQ{шSy;h: $ B :^Oơ0t4hHذa }x c>јuoh:HO3thToR0>8F-Ѩ4jlFHH0>ܡΗ@iB޺DM:*}AuU% { ߡ} ዋzzz޼y͚>tddjHDoz4=p*nF{e@ K24= )x}*B MH(v $ iE?F9'b; ߡa }H }'s $QIM'{#e:QaU ͚E(iE5/ 7:w緈kSupEwSF7m}#QFq:@@†сu`oCG{͏&g"u;p\AYջ$3K(9jmKDlLFLRV[τ @:q:Kh҈#YLZ #}sý2 ;o[476I00=cpDRWpiyU7zQӃ'.Hʩ;s3TY]WRVY@bN-I#]#3Z`ŝږ~W9@q_en_TWnߥcy3f hhOd'zFO!(#lyE$qwhFai6q{wБVu wZt9biX{9B%p$8.(֍~ԩU4ss^!3 q t?|$+zڸFe͛GY9c ! qTb]JVq#&}oDNE P#>(ip ޷(J+l1ν(ZsF.8\ju(tGHq}pzU%ly9[R$@zwO.mpŇ <ƮWvH ťƤ䳻H"O&އ|h,^(ҁNMe մ ۸ %Flhkוͬ9k6H6X T}e =T+*XHF:x𓵳?'4Kv$00M(>џյ(մ oY{jug.@$Hf2"YwA ˍf̘:6(f]n￟[ػQ#7mG4~RMS[MCw>#ʪVdl0bt 6{ M#g/\>ǘSTuE x>n`Gyid`ht{k8ȈNcvX]Kk~4% Eރ!Q);2^<{YNqK7Q{ӣ4 _L L1x6$hw1A#M&5-O^}%J{۞Ш)؄mLN rqw =*`Q ׯ_ Ȓ1I=XҌ{ ?җ.]NݦxO`xPxWD1Y-((S d\r IXd?ӟ1w| O['0zhPg ̫9{!T퐠Sc?ƑhZ8X9(uŽb5MkF}Bh~K!&UB~%"['Ӧk7L`;; 9}* @\sߵ;,\à w 4&HDА~ʡG#| mLhCG|ȩkD6|u <6mI'4CR- J/H_֮]H,sҲxx<eEҲL G9,AKN^I3g> ?:VMB@(3bʔo;! 'PT&@>p䤶JCGOѶk7 9{Uj;! :pk!1B!L``h#H@tǟ]p4&HࣥvhWRG19uѻG 9,*G鑫_4=&'-. 45{.r hÏ[ ACSokk;*^۟" +(F.]74S5;L xRyEUEe9sE" EĦx "%*ᄰTVF{Px;+d9p5e59s}H1)dBS$  ჻zQZٺ]/^}^182* w *ӱ #+{wiA3/W\@G$Rr{Ni4>M MəRpMjYΡś$#5UM-k"ݮk1y%K7,8x!?p힂v`x2y&ĥWv$p⨔BY;z}J:w ;5U M4oBHwAeug.QPx᭠zI#GP =I9}IuF}98](({79s1b6ϛ(DlW6t uк]z;5Yr#EKα͖йՁye|L'=BC;$=a^C@ZF5w<'Pݴtq;{>uir~!N]M[ŬCGe-,\R@ʩxJSJz͊ʮ@v9vYft.@{w&ej ȸd.&KGt@RgoЧk1ĜD4gGzmHpRЀ4 S[m+֓e:!uAv#S30;5}bh617 !QAstP_lH$~cRHv#')# :rIiLqi2.Aw022ʤ 6thP1b`IH+?"~7U ߺvFE]{UGHaS^ &.>t ?{T3+Z#(o~_ljR/# L[M=L 6K+t79ni.乛736;ң&pB\|-YB@2{TEC؁ &)(]AFNnbR;lipk7SCONA#1q)yEU 0HJxd~޲EtȺ @<\M`,@B ~ۓ@;:HLFȴk?nڲJPh*ME _8+ZzF{ʄW6X ?$$c$~$5V AGZN ~qnWٺ]|Hߐ@BIͩaHLqh4w@nB(/DG;pye?}ChtqI9մMH@ϛhKSF::e #pç L̑O?|z%J*y8B>:}]Ye=$tnD!n%I9twp sg#Z`,9>$ Љs7HQm5MfH$ȶi&ɑM__;v:Y[[PZRRRPV۷\oT@MuQusOc3@B" ͐-+U6>k'囧LF4*=QuBYZZ]}QX\%0އFHK"gol@Hն?Cn?h~ -!m.'DЇژ|HOFs""QY!)#u8#: %<z8ݸg[əH AǁF&I)^/.%Oqm+g>s6Ҋm&!)G $-Zl?,*(nHM#6(ݽMBBFiV1@rݴEwμfC)SԴjz 󊕂|6%) 9U=$l ͜9^deeYɲ kx 4 } A)oۼuC'/ҽJ{ :.T,}$ kE"(9[W=*iͩ|]N`#>,ewfT>eJ)h& =:|ٱs Rvvٳ+5|4o'g1+Z_w<4j4*okAFڶ,4EMF H ]v+6n\a8oL>DC@ IeNuhZ}2˺4ǐ*Fg`D/72?|''oi.HsW%e)(Hg9HRHdʇ͍fGS;Fhhh,^FwcI%fZn[::pr~Bȶ2mLu%XWFFPP:b\#2T,XPbѰڶNMz^ py 00z s9_7=wO?~J TH Q+u,@hIU.1^G'McI8F&f81B[PQk'iDv>}=UQY[$߶TQإNcF^Ezn \<l= BD3Ew3?RR݊tشG[;5Xu Փ숢Cg~њ:J;.laFrݿ ?dr̃,\]΅F T}ըz;+rn<فG@C%MT W۸Hp8v@XJ#M0qwʮ↧cO>XT4"7BJ`s~=}. v#6|M/(bjA;gC5 meU8w Z\J6G}=M_t9'w_MGW@X`h r!MGBA!iYߠHsoȅs|`"4PF PoUÛ.eyxF% iqbF1 z̃ߩs%.^`s'BZV K9뵅WW`ɳ H+W9yK!&@{]*(;WB%$IOO4tĞM6tT.CG3gv Pź;iY5EN[1H#4Rqx0R?!1:rTu!&3n=P$޳< EbjvIrf{n@hL*fVa$كN}{}[UQ H^tpˆGY9"w਑֞7Oj$HӦų6`Fn8C:p<=-"݉&VQov.#.:8WbH(tOxYVƿruHt $K;WX{ 'oxx*[Эڝ'}4` Gݘ|4A 䣤Gp5Gϰq }?kG(FH,F)s# 0k;{%KFvSt>GF/Xll^e5F@ɳ{B E)((b(%"T}7o>Û2C'=CS#' *̫ykxK_QhC~@j~:1 cϳD:qy2]K˸$rᑎ pH 8EKeTgu E7 h"K$eD},^/F $1mLnuʅ'Oջv;{jIɩLɫ+ɛޢCR鋷eThxD#F|~H\. .x)SDdԴ< EߖVPWم; O.12,hiWs׭[wH2r G4@9Hhwg/h7Q$_zĒiCaE$UO^.\8.5<~_HA .f_o4$~ dڒ(V /,]·#C[}Хh3==,!c` IY @BwRUK?kfRkLi|~HFY $oB 7gO &tw:jCGy{$,Zt ?no-+; #.-[$Д)S8nFʚN>s@b[Y]HMqŇ،*G=;oxWH 0IYC4:?nOX:<kzTz% zFIHi`Ho 1r"1ubRQm!M Ia: ^YDThBH(~BH|jAռC[VdhnkR̢:Y?݇:{r&q͇AfΞtЄ&@dVrpNaUN@h" 7 |IC+Lw)bG(!실S.U -Gui}?KVvTvXLə H$CҳЄoxxkzPbt5I,jC%oɮAe_YAѣe|+ 5{NиpnFH,39DY0\DmS?pNj4+7A*#vB[p$GK.qӖ[@R;HБShv ON@VC(ZۻC7v p"xc'A2k:xC t M#hӦKʭZ+%H8 HA.;$唷lR'$]"~X'AT f!D[f|_$&Hew/zufӗԵi}csCQ#ih";JHڃ@v_6gdp2uT Y:@U-E7*{_FeO1g7tA6&OʮoE[FEu_#{Դ u l݃.^:yT =tJԹ }"ڲ]J`Pfœa 0F q ['Sg~TMk?~!qXni 4OQç:b񉟷)\-Hn,|H'b]W rGr{I1kMOn.mH' ?*00jC :wMK'?}݋K+,\ٻԩӶˬZ#"& OV .!7.xR#'50H ݰt5{δ3nZCS\-n܌ߝ#t% _x;00<M4ޡwJez"TX41㪣2<*H.h"RHi9ؽ?{ްШ5oGNǯ{U7?4l%'2T4FUEhB6&^"'R"Tc %7=7(!%7>}F`X*Zi48 }1KLǢ);ثs@'R7HQ 4lRU4tsɍ!# (~S׋}Gc놴W1w.&U*>E4+uHM\D>7 a}} M_QG.X4@bQXt.m}eU #}ŕpB{HyǚbpX{S/.`3%UuG =+'w:~7Xzn9VQZ9f$ M!lV7J݆~rtCbA#{A`Ԝr.42{f]kG2 7tD}+;#!D#5UM"'s# ٺu,mXV]vOAECU];0<h4[D .1n>D^u,=*ȭiDQva՜9sC!+r %_q>D#=ؔlux cx-[·4RD[?$- 8ddG("F8!&\nA4*o rRrdPйz˒Kn4]ΝEB\xB^|4 Yڹ@irS'\9d2> [7*wPAލyi4 }vԡ#kGEc/N4o瑒_N鑎~v4ٵit-.Aha3tDYY96GV 7Zacs3獢ﮕ:Sϛ(bSr;gO"6@"Mə6mҲ $XA 2x"xz)tBbdD;{2K|mv.]#.:Bh;f4 ^7 dD@M0?*!uߵ< ]iT:@CGfG}2t/N:=Z NR^Ӹ0XDŏ=pc.&q}4 '0lu(=U{1Y3w`Iiy2BLGURHC"YBXMS܊/dD:yoA+wq7"0ڟHYh<;7e {HܳyṘFJ 缻ąk!>F{';SSy;^"v({(][Sz宼#yLNy;\4mhzȊZ^\m3eʔ:}BO*Mcѣ{H˫: h+)gr !ш"#(Dw!'2M! iwm.)j]cv윸SWH?wKJ~.X*0g1衱?/!1" y[F!=ﱯ7[y@H49T E.Z R -}RǨ@3Zݤ72u-=1IƮO܉;5tȠ50$,fN@"v,T yDb @о!ȍǍޓFyHYMKBZh78ڈDf:y{D?sy9Sh|6~ig9uɁS7IEC}b^$x  K燮N^\ Gf p-(.ZNYCJN3=H "N\|BɨI҅Z(a3uvt>UW:d5$dUmiwlfϙG J@Ύm&ΞkH hȈΩk&i]XaļW­\[!mjFR{op5G7_fƣixG YA!w2xt iunxF g%U3S.2ȸlœ$:;Y!=:EՄ&h@~+ISK MnHLAn{7FIYFJ 1y|F *V/ϟtvLm|Eq0i7Ԓ]tj7<'86[GeJw h"R(Ca}VygVY'j=%;F|<1%F,F`# 1t$K$FFJwrO<3W,ew4@),:yڴR2V dV=F@ uG>25 mD\~D"^<[9_~ㆍ? +dYfr &-~ɡYݱ('O9sTnV4䂊Uiq]RKhr7vDѽK};䖒[#LhrssqR[F^y6q*Ե5*ZJaq+W ߯~ݍ{FJ ,Hoڼe舺!vSa YH@nܹ Ul]BhG I  gw#vD@Z%(lNG>T24qJ;dpf $ "ԻKɽ%Vq {8iu*^@F` =0TP4 I f xI&! t  RU/Cq$0&jjF~Cg'wqbF羻DPd*@ڼi6!T mǬl()O`v )Y͢Hmg΅+X] c>;7:%eHy|[IH2ջ~:̾нipR(gx E,ىI)@b-b$n x߲v M]bXH-ϹaJ) @3$2Uk֊N٫H51d} }KT}Geนc^eGI][% j男Sݥi 2K򫞌O"/9eQzQKVE7,t4@¶&jhӨiS׫qӨeM8hT؋i4h4a@%>w{K !a F464rr+n3#ǝ:y/篋gni+/:1oan>!蠔"="hRѨdB4y8KMc]w6 +vN# FgF-$&gnHQ^y󂅋_0KO+j~J %&m!Y$*)ǤAO?o4T4P gZ=$[Z? O>Uh"ç]*^ґS˅FiEٕ=,4J-jˬxF ea QhRV74WTd񀩥-#o'*(kH)3+gG= miH;$2ICK_IE`ނF)٥ ur@#iCD;Gk+cvEɾT8 Uum[m=8=kG2Qr;fR;$2҃>u+u28HJE}F(fIeFFp D= ٹ;#CswG⦱Ff`\u o]#5TKLf_uıX7m ' }?s;{zDQVA^8WG`p={/=dF;{ ~Rb: |E'/tpe_F #֙某O>Kǐa1iK.#s7mqpA|+Ȋ*A!Gw?πp8$%CM5kݼݘ>(V ߠ爫^WwAho.IyecojNK +Z࿱zk{wUuK7,0&&HdKnl^p߱i,ѬYQc]/CGpo QFI;.1؀e H~XđFYU}_9:ȺƎ,%;f3JvdxCB"Y"498Q!KJ (h=OԶ<X%  2BP)'P,5?jhL``X#$'Z SMgSs We_7çP{EË! MCLC#5M4v=yr{opq}bRLUwIߣ7m "+man'.%c`*;5t";v=pL^YT ;G@CݸGHIj#RҊS g֙Ҩg/ݡ7|ўY;6Z~̅G`g/ߕSRWکʰ^#hሤB pq n↧N\S9v*}r񎌢=;t<Hɫ^eKBh^DknZ =DQMW\FYgyLF59tDࡡGBV]GD *zKR~.'X*0Xƍ m;$ܼf4:vE)*(AC`ن#P]{\ƍΜo :rɾ$E*+턗F)1D,%wR#DCWWS$" R?U4 ?.򗿐?C8xn޺EoiaMMX# M }F*wH.{`OC@.k @sH Yۻ Nc}OsJYUԴ &}̝u8&  M>cFzWg`ԉ3W Å'SOЌzp2-=c],YT\]73D ȇry? #. N8vGHJ^8.n~}y H!N;rGHRn-uw }>u4yk܃Wi 7Wut374\m?l@@۔)9U=(1R22؇Dʧ@z)/,YONd+/ghL&@b@CB\՝D}CGF7?}~HLŧZBV Yb9G]c$0H?;u,M/,\hhR솨dw`$sgEH- E&f;\#,1;Kl@Jͩ@~yUu7$ hG6N觖 qyƑb2L qU}`d:?k6OP ԤRg;oܳQЉJʅ A@XW񲴾਴ݠE%G$R+4.șK'ݨya゚Td%*[Ї'%q펞# Rr*'7*zap^M_n y#MBbH߳ rҊW艹C@zYZR`t7=F/-eva qRE]ޙW]_\#$@@ I ?Y^tT4t/Y TZNL]!ǍQ)@ 9u;$4DzHd2 US>^HM]ΝBR HOoZcK{ӣjA# |QX⽨cso?U4H/oz;JN4:ѤR;H!̪4xvF~p%b2P]{̑+Fˆt $g:~YrNt#h4:pF#6;" q-;LL!TiyB3mH6.ZV 5 $zHPVogK*s:ĢKlմH eW$ $.R!Y$cH@@ HwlYUŤHeUb )4PߺP, ;{!-mW2ًO;5 1%eO,':H!/ Hک܁dpұOd S HYT5 H,Pߋ 4ʬx*uKS- :$,,Z2#PnT1Esz'ZYV6=kF kު TZYFu-ܕVN,!civI#9Dnvg.aM*M4:P^^^O H i!54521'nzO*j `:Śί^#$F_T;R+8h"ss4Bi#N#'8:Ʒ3eҨ ˛]\}HpA;7:r~c8Vbja@T1A 1fv = Z:@H)F4ru BV޳NR^n4 9zF6n!@RRC@RekJ) $Fz )=Q&֩SLjI9:ç*[1&'HݼdH Av~ t Wv *@v/)˒# =p@u  Y+V Vܼo^ }ye>Z &fFҨ%唷lg镅4GФrp'Q4<"!]BRViG8݇nyWQV\#NLR.r ;HTV ƕIYr+V"TśPng|Hda;u4Q1"%YG)m*CJ;@Rܩ#(^hFHlV $-hP\u+S\U+FHFX5'XE-T&Ɛ"!%"QDzsS׫VUGd:]"u/["TXYAL-2W唷uQ^UwVi[g۪`;Ӌ[:)c_u[YS0洒NA#q\uTH-a ! aa}%48 qߘ:gv莟\|h g c׬(2tޯ=o>Z"{ֶ>myj~4nkB *:_w'05{0&&Hos+l$KT6U4$qsRd(;>w (4@b(4cFql^F`'$]idSPLF1!sR48{{* wCH޽研"R "‘dLIE].VhRU5>: F0H 57ǐQ 4*|JШKnM{NiӂЊË;}a6qm}jih[1&5!@H1hT܇ʽdnQu}`MQNeWqODZq;1I#$ewAao* 2ʟp)a 3+2;x,# ĈJW"dV4rCaiM͔TmiTV$7,EĹw}GSȀE:uə%,4mIzIHƤ0꒳Jc #: |ߵvDI'1k_Sv!): J5wH^Y  ,l ͡=y ވ\t8$ {/\Yi4yh4!@lbެ:v*b5m M,+q:.AxԿXw#91;c.֡augζr:"D 3e(3៘17B\ /eH&qRKťFF $nuw bޔbXbշQh4X$v!eQzҋ? lـD-ӍR(@D# $9uH`8w{?Jܦm=Q PR7 ْ].|;@"EeGEqi Tcbx9@ gBx? ~&L8&C(" dO67$ MmCLCb(bބC_q_IJa]f><2m10 a|CHF4*k~L^Iܕ;@#$NGPۥ/{!]K}ݥgZRBvKT64=VPCxk2tGVI]qwpKie4tv{$9XufA &Um\u}OU>z*ep[FPsOthroْ@ۓSՃht Ka ՌCŝ2JFQ@iMS5U|ǑHޔ)4H?$}=r=v F~VA5ԎQDb+'8e oY T ApFAH:CCGp3؁ t e~z >``XuNG zNM]LCFVU'ᇕG T x p7JOJ%Dڮ7u`e8& M>HTo 3 {gSOg/FzA} Sn>Kno8 't;֮ 7ȣ/-wA2G2ɹ *2 ;]|"}yO΄Si5-9ڷڷջA|oX:×-)%naO}{p)V X:j4ʍ頊'H;vHW:smf.,YO2>{ ;Y I+˨tT 4pxiQWE&%R)4.uI@o:lj" E}3.ްrp "/ʁmap[WIhЈ@6˼Qȏeތ+W* ._!q75l%m,|c ]*H,.&s7tFھYifD?) E$^GفFھ9.ɀtʩϬ$TPIAMĉ ( 4ʯ{۷GXI(|mC,[>FC,sC~mE ]f/0&]oT:R Uk)-e34Q4/&q$Z,2y0p-m@x47P~UT>1'z%I{W˚ji& $Mʼ8*׶=i']uTyZ[vOETx}x꫎p~_^t*&U4Ca]a@nUNp톸용+%4OE^ڕS}"(!BUG)ŝ4D;i}:4ީ{uS]} pw7Py&LE0}:n`YhOi&ѨgMHF𭴵9MO#i%FH÷cɇD4+&]+@bһO`ecc#+b#cSH<긓G@Im*,B3i| MSyӻKѴ.F i^wG1 FlyOH,e,/B(~F3FRHDnjYW;]qã%ïYS_8Zusx(F4itFIU=[>lzRUCy%5e5\L$5-=i9CGOT5[ڑ*nq)}㸔r,VNҶiăF~h?r%t;tͷ|y'!h{dԙKw1J$Qi_OiC2A !QI[7l )h*Fx/\8)Fn~ o^#@J!TW^߅dOF7!Ե ÓH˔[ld KG@N">ܥ~%PL-h^Q5-} ނdnZ#8! r]X݉ a,X(&9|BF '0%DT-E_̙#~+Xh2BᆑI4FJi:MFT shF&v5>'X@c|BFY(=}C/\/l u&QT4QY@BAI44䆍[|P~ #OFyeM+&9jkus?{qS 9Hsx\fhL:Kn>RXl& $u9a >}uMHĜ:kGHMAH/i7H|:xCGN^!!\zԴ H̡י ߸( qy㽎\|#._>ϛ $g_BͲ'bzUYIJh|ZHeHy ’>|v|n#g .qHcYb{(wy.@#Adޜ 5W )Oe8mzAO"2,[:K) *2=sЉhDA4zo4>MX75joX۸yBnXyq$C2+@Qg! $44jiy4ij{iT)j@G\iT಴*Mj:G>2A ѻK{w)GSԧ^h8MKPһ\+!k3k걑)$(r`tMJ#wfaujΝM|ѷs'󈍾+fDKB(ǂ?2H.F4QOG Qc#bld{^+6"?IS#t_yFѷRQY}{$=uH@h? }$CG} Ajlcw!D̊U7dUT4CcP7LC0<.ȉjf<|\JV?W%d]2m*ʚ1igdep蘤+n1 w?EM={okg@zs@JAXNy;HLM2Jv^@ !$UU`F4;f"?m}cr\8YmoF{~iŭo0tdR:eGe ,{WD:xw('$1w:ƆH_~:ߠ(FpU\GgDG@*}Z$ٺFAhnu;vzXR %"kpO&Jd>w72 [O'pg\%ʁAymxW|"R UH)Z@d\]m3WGHmlFH2XY~C$Eu5 z5'kDB-]._uT?2*" v^Q/ I4΁gh悖!RPDRHT2 )6%f/}! 1 +XD2c!5WUSoHl4O/?C/{R A "6õ[~@M-5>En $qѤ@jA@r$O&a,@%T5$ǍFQ ~ Hж<@{E.[C7;>tmQqT޶UR7h07eGky^!D(2{w=C%QO]mӒAHޠ:ʆ{2~~`,x)%IE3p]tZ|hI#t/ݲ8 =тAk ;o1n&Y垴z64)+H Q i՚u!iHκVk7n@ /YUU0h ̩Zo\RIC "!@ABKM^ru@']/B i:^B`lnic焓܉0@4.,.s) 9unH\b3Cөe RhLvrvixfiQTƏm،؁@,@jy|a4J-CD`d3"eqk:xj{-$fD 7HP2;7a!!J#}.A !sڴ; )9H ں?(* uo$HXMEH!Q-o0o5۸B@N17H,RDP\n7jo'#>B\ iq17h^Q$[Q?դ[T|Tԍ8l],@ B@$D&NP38te#CN^_|1g[e0dDe 0ڵW E@RTٴem?n>j yBFysJ:T Se@>-@vB J {3S'PD<1^1 ]+ TRec(eH)I%@h&{xe-YgQ2ƦgJVP7A@8 y%{$)V udR;Dɕ@X yFBV^U_Y)Fxf]pb @d+)2!SL{ꦃF HӿH!Jϼ ~Peqs %6齡p*{$鮽Q{/ 2#**!n$4߱u[}ꫯ/;Cue.Z, m^u?ٖ-OlH^EGe7;D{΁KX|-{Ťǁ3H+&ODa0 a$iEmy-yc@WXUBk7l%,W=w(.;ƞBޑY4ހF*ycr"*ly4"c$eeM'2W7>x3EP~UQ=UGC yMSQwfVsTeW :J)N,hF@#HÛѧC#>] j.s҈HBX.X֡#D#HVHrwΙ.F4fG @6"y=uHSWNS4@yFԹݐ _Z{~ qE#>c]#]"a;rkdU/x}tЈ :$~FNQMYM+*1#cuwb6IqtR.5k"Qz+7G8ޒ2EEwI~)M]HRVPW1== !*"2* 72,$6j%q0ˆ3FGN$VCkodip踤;#٩W%^2 Ki9R*F;&+j4u㞽z#*<cDzQ2K(ZyqdVHhho }"CGSy[:eFSPl= 鍇tOvolLF'/\vֻAHEi qX`Q୨ Jdf"4nݣ&g齃cKk;F 5~a .K+{lHH H.>Q!72~W;V²]{E4M֤?,_oZ-DQw" Pԥd r&`IipR_M/4Z8q6@ aUTu`Q〤b NT,!Gl7\qO8 - WЬǨP6>h v AB|#OoԞ{mI' ĥF 97&gy{ryS5 =nLΣ+@؜ƅ /?&?-tΜ%ǝ"ŝ>p72LNH@Gwvq|" k̍"l&{]vY5mIw9 _S>cNʣv١nĬ2ڝQBD|jHwDb.n{{ˮ)dHnAȱ ց!)Ѐu>A B*ƣ0|AԀdHzV4|d# sFQYde0jXܶ8_oL,%.c^-?}FEd C孏#7wP)EU-äFxQieɥ,#㦵~W8|dE% L14MS4 L4\Ut-@X#Ȅ D%.;bqyq H3R GN]V?ʱn~̝Pg@5 FJFڇPg݂EKK0i Qƍ8 $[ZV $n qƓAq3${{c 7LXTka a÷ihr%`,oY T E6P'B;t<7"rLC-+ iqS`$Hoh[> H UP bʌH*;4qV /f6?BO]YyZ:1Ba+k)k<`ԥV;£incz AaІ;~va@#I{cg# ]%Ҽ#ehBm}2vpdRiZT~uђ)@&(95u߹"_-0h Qg1px+87B@"+gxSLYf1z HPyҊH3T}O a\E])&0|V$_+pHxO RK(g1L RU0j=T4 x)4|W?F*AHω%!hX9ԭ\}#._1QuqcƩ AqYqKI u"=e7VXw?v݆-;,8$}8 ʿ~MIctKVT\tωbR>:P:h:ۋyw̺_q>~|ۏ?3C^Qol.cᢥBZ'1h QWrxw ƓiWֈ9H{P@iS{'@hCzo9,6zo\Cc2XxiO / PB7 $BݍEM 0r?uLv 5sRCS90&N(`u #vu7pmTʼnk,l\K^L߼8$j>0fzR4nh=I {L4|# A5/! ў}oSPSa>0j; RdT eR>i H/0p !ޢ|Qq*&Wr2R0@#@{Vv/a g5|4z@z/O'bC19q >o 1 4>07ácy̬Kmم C=ϕ4*BQ AH@Xx)˪#H,@&n-iUVnykJkcE5HXzyqYU$9U=0k>q /3Bov3Ps<\ XcQcpm 򆞩|nlzff%약iT\ ߡaI}?n%mL+s$}fھ!Qmã(ިa0f>С#\tr;Fz~+f>oЈ@z?i:e/4?O|Vwdz;vnxp="6l/ @ JDg~ܱVOMj8=7>}6OLw q0 A $BR,[u?^q 5N$VgÎ}(<ʮy0ooPNU?z):A`ͺ[7 Q\^+\Q5#H3Fͽ#{ښJiW 졣o >PM]8yTvUGeݹ5SQwdWݟtD7gEКqO]zy_TF}N"Q1'yft7*h ѱMOF;٘m£&ZZº Ai,=_KX~L#H@Ҩ)$4jz9FHh*4kuiTM"zv>M!F3FpH ѻKSMdw X\D#Hdv涍1FfboYSc#8AhӈO@PvVD]7h 詣cO X#i`D{x F,|EcxFT3P~U%˚z_剳wU1~y7MN#~iA-{wZzw]b&v@@(khro_ee tZbo#UFAUQU+"! i ohƁDi~޵;LѴiͱQޘCGo?ϛE.[{dd?Ā4A9f&Ʀ=Cdޔ&-&o^ƧkkX@}nfm3r;q7@jcRFQ~mlYi\s(Q݃&A(노)ML4ׯ_86&֡#>lLa eIX\^G h]]]OOӧO?q Al9 e\ {Q}pu uܷEY}x3LI9ռ@g/:y*MNip</n1)T=tD:JhR`Ujjjiiiccc?,>>O7^n۾.,>ƅ t4f8F ?Ǐ_pAPP>|,[BK[hT\\\]]W^_r 飴ZظTFx4 U? @R[[[^^^RRL}J2Qsss__t~@4>4O'PIpShMI--- uQK@ISSS{{;u3Hh=xɍF_LF;_||4hy8aFkҨsu hT8niTT?Pqh4υFu孏iTP{exfhځ_B088C/1CCCmhhG3HzK]3i|c_X QMLj -ٰ;pQqfd;;= 3Fs'4=6wLM;t;Ͽo/_2=xً/E|oG3HCU p$j$cnć؈[O8^F9틗,U?68|"0:y芹ݬYrkQT4ڵW~DR! 2hOaO7 Uf$$`pkhYI1!*%i44ڶGFmHLOe"Ƥ+: ک@j/ʞ[wV ޽WTS(ɩWejnb!,#ǕԴ/䔽Q-}\&)x9_H ;FTWwX\6'%:)Fޘ@ށ|tť ˯@432 ΀3 t񺥬:H$ḙ\1tĥV#Dy-ҩ SeۏL$h2POam-Z,!t 52=L# Cmnzuq 2bUKb Y:: hnQ H.QVP M 4P?˿>&;<* QDյ lH.n$R ! ,w=zoPLuˠ#4;hʳK;H=}cM[]3G^Iw@N>HdF1Bm_ō+?@&Kll?scB(Έ I8}jA mw!Ҟ1Aqg/@ZqO8 {Eu(6B7ظ1"^!,4*|찄|Qny'<Χ@wJ` *iv݊GlXqW(  AVU4jR8=6^a?~UH֬-_0UUr eFewHp1#DQraz;Sؘmܼs/@r Xq=;ws@NI %ߍ#(0QP=rD WSGxC`dr_ct-/kڇ "rY׃UQS'ZYS ۦyeCHpð$'`%fWAVP t -#Yykևf# 9x./$RZ(<<0!*/LNCTWH"So|# U $Hm#uV gFe! ݱB=u>7}GRTZ[v'w }|L}ބ{H{jA 囖hr "S.ፀo~{QV dq#.`},*SRG42=uaR2/Hƍ\e'/!=ynx@RN\Za($$02 9{c/ odn媦mhdre+K oB"4 $DFj;9u U? d}xrZznsPT= 2;d Qqs-pC IFͬPdgYh JpN#HA Bk7 HhP鶕ӢK<<I ƬXdv! sRD Ť<{F Ux}CNY+]#,L.pú'N` H=1Ďl8)"1hT فYr4W#,^| @J̩A2= bP:hhr, iIICeܨj, 4*m~B ?ۺ0@p1HK!F4>>< 'lQI& @j2 =x_~ǐrJISo}7F@yޣS7)cҁͽπF m GDrSs6\F z|HxJw} ٻ%uhND Rm3 n$D %u7P^Up^ʖaFD `>xDK&ϩ3 Z-hS $Zrʈ;IAUAHY@]HY`dTw@`@q7uИ/= s5BYEuH+,=9H 1QTf}~itrH@$b~ Yv; %k(ە/u u9Ng ]wIBCPRnv A ZB`;ڱwm@z _Rnn*pHA)m-~`Q^U7 tugx@Jap#/Xqn@vy0-7ú [2K;0\@konW_}}Ө}$S1𲼱ua⼆70YZ [׶Fխ&5Vc\Ayr^ T8u*fܪfGEϮ Q1 $Hzc-SglKy#N=u҈}LһQS, OiD[S#h }h ѱMOFM4>> M?4F}/چ{}|4*iDތFm$Өn(QnUq#vT5>z@p4r(͝784 v|>AJ#܊>FhFMN]\>br?K;8 #,Z~w^a~iw}3w #gxF'{%F|;/ ho Q0~һ؄'7OGO|{> |Ii^Һh2QY#= .޴ˬYqYՈ=@{DkC  0Oz۞Ɂ2$@zH@HC@YV>i QhtZAYm+'OSH+kWah_J3:sy覕m[o*&7JVY[TB^49}HLR;7 9EJ)҈@?nݺqj?}'$ Ce!q̰_ IIxyӂ0s6! A~㦭,\Y 'v'0cŌNZ#ƍ Kī_7l`"6r, 'QlDmtD `Yf1Dscg=e *_575@H&E6~_|1c?Z#7={o7Bםp< EOΈ.kaR@T8$Nkd,\|"O&JF ht!Rnhf_oRݎ^H͌m=|=Nr qõ}#C?fQnUgB> CE[=": ǭݰ=n.~!HI@C0 ˢKP%d1y@r ݽO ݴt! !o7Q7D4ºniq篩h@󺆦F'n(!w@! T$w"F\T#lzGKEHc ٹ!Ӷ?/) W\J~^Q q!F1)nڲMWҙH] HB_}tx7g {ː@$&.u; 03oxHMk|R;|MW +&&A'ilw@T+{6ln>{ DH=b?aؠ#]"h+sOSVԲZp~"ew};w^Y0Tur]g1g [ $4n4F?> Na $Y% m$ $0"{6n#"W_vӞ7HGNogϞr MO0{^={-3ꩫl|덪ۆᅪyAu۹*[z޲KN(f '5Jn{֬U3u3*yfܚv3P~C"덲*J&Hp Htz'@Z[[322@z{O];a:fTz M7<HɞIi4@=ӀPԕV1_E hww'O@4x{K4>ekThoYG|@bW+h ѱMFEF9-4h }@b'P}p[Qm#bN#oL'-ހFE Fu-U:4ak6o[:hIM/2ڇTuLv L04m~#-%5$\^Ni_s$|"dT~|.[MkV po]xCq9uxi xAHJ5 $!uћFկnl3"c#oZ 7mna;PS}.8coh2D0mlyȸnu+Nb?D6~]vv m -;esĤtp^z-n!/-_ܲ} x1:w.a7wB4nA̤VSy#\bC\d7\%7-vx#\gCrue d:N8=N*!x9QRNUbv ,1<b̭]ncy{I40*Co((] H("(ٻ  ػOy=ubrXlڅ{fql([&n5_z 384/fmy[3j $*Cb2~q ^aVOF')f#rQ̰DW8N c_lN]Zp^Q(v~DGشb`uA@13S콿Bt?v&7p()ctw89 #ջaL>?"]v]"_;GrKb©,#U4 ~Ã'1$cR h{UP*u88N7m VӐ*-GY%F4xS``?VgdܰB<orP$^%dUtqGYt7nkjqiIOk} oL"(>  =w3j} ox6']%ƍ9@="[٠:"" Hl T>q34tɋfrQFX(&ō\"v!/"-[ ]StNR~' &-) ~ 1wtč#x,@bkA ض[lՏEUM˪d^ ..5ӈ},8F۾s? .z-_>{%Q-[f͆-pǙpxj@۸u±9ͣ#a;ضC䠄H_!1oR7W bܤ,fXłz#m7'3b'mݾ+H]PHSc@0 $Y 9eXWW[E?(+Իq ,8HA qԻNWnKdp6:tBQM^/I9u:&$b$LD7R2 ChLFptל"bz6݉z a[p-XPT\ΠiX5ȗz7{j\Cԩ|V؈խ۸ջpk{Dy5~l1H9urZϐjBݒy | 7_A1ypl݃qfV}4;O\ܣB4& H0b8j;w۴yW !(>h:xܬYڹXIm78HVviCcҧϘ!*)kUk2 HM[$@j$-=vR ok'_/[ZT@CaqقKWl߱{.[x RU?Bfw!4tMJqS]G_Dxse )Û¦O!"&nfQI9@ LO1%".&Q"6oފmgv2[jM[̜9篙F4>y%\uҎF_wM/H $U67v ިc )nT_9zļ-F%UQ*lyz"#3׮ߔUԕ5?*io@/߲8w ߩZ!9n@/Ӌ[G [SX2PqBz슞7DHS HNݗө ˱qeAٲx/4֩3h уM#Z5 $z@@iD{Qa# $z@Ө9ʬj^WG4>walջ޼Ի@ H߃o7B*No{ w@d*NS,E%eѐSA4:9"$L;:s|}#L;8z7ԻQ7O}g;:FO܊EEAC'Cc*_ r" U>xg@'3'8G}уÛsЈ=;uGħ'ķK?Շ #ˡ_/ac2^?x3b2KlQƣ4^4^=xw4>u2D]z RkතO]#|omS65xu9E5j0VrKع3,S6ZLUT84zVNI]IM'"!  =Լ*Cb#~w=qCP7%CcaqYpEY]V*Z0uꆄ<*p=79c!+랺hihvNm;gJ.L'n[8BȖHBݘKc7rܱk:um=4uMȉ t@$؆'  6 oJک=pee_čW߈VGcz8on#ԻF=i4@ћ*t1t3#h>C㗭XۃKZ`!eue76lR\&2$8e+>)!g u<([u]- EXop.ue flWD7jPZj_7=z҆궁*VhQQ]_Q̛+?P<wfYzF.>5u9 F-Ym{"㳍,$ݶ' `>?9B}h :uusl]S7B_X2S7a4 W:b hTYHm:QUc>@3g^PT t/Z,]x="h߬'_-"oJ ~ݹCG@š/:DӈVM~h4 @ru:c9EuCu-+]e)6::xTFa-Etr 9E5kG/Qoش70 #`I,!ªNXqdF~28[|6V^ΈdF/]1iDy|@,{.-p}Vw+srʚ<1׬z!'cBl݃R.l&M[|"h ҨiT1ӨUm+D獽oxѨ9ʩ(*Zw oMhhAo&]"_p% 9ӦMzʬF@JϯJͭ(B;ufGNugΜyQ)3}ƌVjHeHu౳jچ|BGkظ-`x0.DoٍҧFD720= =3"o4T`4 ćFy\5ad..%Osx]Fb䃢3iMQM(| Qy1$yD Eg! w7?~Sxq x[.C@\~׸:"&0m HQhƒ{th[v4a>~ae 2JswgFSgaܴkz5Lށ;u@1Աt hTRNHwFu||#K 㲥Ww#]/vZ4(&HF#Hakn4 xUg;p,[/$hQ[H@B)8I|0G-m +H )Z"Fh/_)HmsΏ(6@$ 9ln5ؖwg.ݐWx53?_KwG%boVPŋF协=-zh`@ H'O& t@Fq6r$ GM>CTBf+V aJ>esIj Dyg85 CxDL^鎼˾a5[MH ]U?&Dm@K;^D=1Ш}pXc#(}L4&@Mau7)mkeQjxS[:M)J ԑ3w^xL䆎8G#Cӣ0Ʃ1#bBG&GN[ٸ9gy믻u0vS$4>5L'aHu̎+oi4Ei4A@*%FumO&7t4*o'm֍ܣoLN:T(mbh3RKGm%N9sE5vFNQ򮃋W 5Tusy9֝&~w+$XhRjRrH4rua oH{OIZUS_JNN|00äᢒ.Z,.[+k¸q;We/ feh39ʀ+j:0OAIR 7m=+Q  fTV&0 $ZÛwBjLo8sm̰873f(iۻ'YɃ#'ؐ~r=m^M]q1 *8*?bqqu#fx]?o)M;v;z + ?ahHΩ%L ixC't 㓿MISH+ӧl}_w/5=zn02R9l|q@$9"R*߈J{`Ny;HHPorThhv #GN^\,LRFQJNy="W˻E%B:49BAܡRt2[;( * 7fduƒ:wº`!hV[Y{lގ+wUtsL1@ĝ:QiCWS4*za.|@t̐7H:&ĝ:/i4Ei_?0@$>{RRf1Ш-ک[z R@xe8T5HKDėUM" eQuZX0A!,")@Whʋ$X #@RDCL)%$T -]#Mr :h>o`kdrtmlt4q~HHF>A1͏ޞ}_zنԼtndfn2 w=y`ⲌGIУB`wBBݞݯO^~@Nܕ۸UA>Sfaw`; +<3 ͂+.XYԏ_uLHq#YbvMH ]b@@lRp &RǓ?r WcHGEOF \%ƞ3g͎Jbl fGs-" ʺ 68&TI MEH]-XhTXh) *T 5&Ǥ9@b@|ip6܊u6!-ChKɫsgF9HR9zN1CAU뾓/[օAڲCjߞL L;,ob!>Ax|f(8$xFᭀ"{gȫhuZf=,kOb8ɨdϻEKπVOx_ Ј A#ym@FSF/*4^ɇ؉ H0(cOTRZNi>q$q WBJNNQ9pҋV .Κ5{GbW~^Vi6rĤyN_5"! ʞ}2Dĸ < .];vl: @B81}Ҋ;qW`=bx#Xw~3 ℑk m߱'&sVyOs6g/ $7 pOGqe@ja P4܈ͫo8q]p1ک9dNn; gťPs=8(Sb-w@;Fx  Y \dr4<44 } Aܪj۞6#{O0SW1PdAM|DPI}_UH7?hQAҦ֬Q↧im;Wg9|ͣg&0 $P`p=B^ 8QPRhl2x,_RBhח=B^ 8WMR+64! 4SG*44, M`߶?妃?Gkoly9au}w:4cs;`&gꟑ&0 $*P+C@0 >2yidd=$dN@ @ZoDR=/ q2._ $R H 8I4"&HS1$<}C~IhQ `]#75ƠJ4x (HGR3/ =2z_ z)(:# *zr_$nu*$o$j+H5ؽX Gh hR]vhbK4r/N%F4;Z< 2yM =#;=U4R" Y;3ڷ+-k2<dF:F0ʶAFSF﫻D]@k߻DYӚW(l,kz1^4zX_A4&^u`gJȪ2˷,^i4Ei4I@=%2">O+/:j]::VU}ǞY޹G SPLn에ECRVnE _RF++и-}Nѳ TkLN/:OlL {/ <U[ḪFSZBFYW G@>%LucpgƢ9 4ڶci@/t /A5탴j*MFyu4tdzxlw簰]"nܱWPPe% :f'cޝd)#r[ػ {ZPg_mq YP7K5tpђ |@5i}740 MhwO  { { {#E9% ŧP<#$+>e/v=ju>ѾV0 k> Lqg_0AWY;78W lx&/(G,"6 OL;EX\ջpʭzy@Uo퐰w۝{I=iP='.iJ-#}]FEӐƲJZw)G֗0>F%O;^}*TFmM/Ө1MIiKp =lac.ͺ[4 FYEV7ݱuSQE@ Au=yYsR6@ GY9F]Oje/_k+2K#MA=ZJzVq@/9z=T9xUt73>?Q?-4~3~>7Oy$򢑍k@FI8MO[wG4bFxÆ$RbFL]kW U;u @U)2!A#7tnDA#CBtY xGy*Ha  ̎>G*AD!UoF CT*Aê2H;*AW4✗W qeg󓗩>qMӗo˫󍰌OS! H=S4ʫa N_.XkGOs( g1M DiOd@@? H0LȀ)> "_H:@,.9Hػ-R4,=!ao68@RTQT+^zęKt#{+ZJxiuM阢٫elt $^@2236QPX8GLmk-=Jd6l~9`6WleUuBc4be#-ܴO 5 nD"eTCb3LOI˫^~rH5GI&Gc MF0?g;8lܑ5<Qg|4(:K“;.$"&cWNozA4"Գh4WX u=1^HE%9[;#QFa=HFi\vqaol_|CgTz|L- H)`$wU+0 0w4>'NWy h>ǤϚo粖z4Yip''H4{1o2H<FJh'l*$\7>1)-FX;𣌢b8?s'06]^E >o¨|D#@}+cROh Sֆ"A$g0`Db>i^1U-Cx $0'e }R܈{`*i%nu7F"A.x| F o'5bڰ7|30>".8ҋZ;ހ3H4&F2tH##1-?k{܀ HjZ[ٺ}džM[J{ aY9km[wHoNjb {_a/6,W e1 ‚Kou]HR $OwmS%|EKS) ^\s43K~ynq $>@sc" q(-L%>蚎Ad*x@V7 o3 9D4*|8@#;wݲ'. 槡gfqn8rSx,]MdH:xaDrB=iM &H՘sF)1az%XA됰*M`c3uψ% $b6pw D .[Klŀ;0Vp j;HH^bVYە q{.4*̛Y(nS>Qi鲕  iWIi4)48 }3%F}9MnwO1NZHĜ:`!2-=Ss I]׫? '2uN߀P::N`dHsÿk c]8a8V9k6>_ $ZM/:>@R:rZhĜ:@'Ie R7$m.@ڱkgMIQ$i»KL0>W H%x5>ii 3=oپ,itc%_p~ ill]ΛowN)XzFbnxw``E6.H-^F%\vVQE1"\BZ]RPRpyokW&Hoy))O4E~@xso\+s(n:#µ ͑fc@-kyIhRh4A@KPTw(m|Zï*~uEO*8G6?C~h ꍐ3T CcﻂW[QUj{+[^7*kzVThZ5*:2Ph|?S|،r@._xi@"V6mbe:uA1[#`rlQsFE:"l֑\cМT?ͺ7@n>|G`> [.Zԝzԕvjh=4FioAewjeN~4ʭ0 M7n+iøc4 LR6W15?QRۍhd|Ȃgz}/>%BcU4dTu 敷! :{@KTev:><胁ٱ,Ǖurzv=V-/"I9U YUOU<0߶im.}|@V1IYab_V6fFed8{]iT@`VZ $.@5i}4( MpHBJ> <1(27ਔWn\ްiO`4 rTȡx"S\CKೞL ,^el\^3VB+ *w AUjyFX Z>2)-C= Q*KOJUTbWuFkM[CR"qvfGphqAA{ Hd]D_U>oy34*k~Vz4*E}ǃF4e =.E @qQNU_a 줲eW>*hxNQVţϿ־8 M䙅?4t$!v}̃|$@ڬ3;rJĜf]`d P'Qn '" K L;17-(ɹU jygŦ츕p 5xa|yՑ(B<'-C0¶)=m $4 $l:O4Ƭ_96hhtfc!|9u|#AM"PPȀWO^w M>)і6>5[osݘqo}BRH|?фi5& `ܳ:8}i2)Y%ypw7}/m}3^#-}Sn+6(n]{O-=N%#bv]" H1hMZ-'-ԚDNa<9+U+?{׮>aJ}h$W6& f_|SIu$ [=2֨7$-!g kt52k'?;8 IPЇgÃs+:xiQD*Jɫhe7" ]|\CVw񆍴v +[|fE\F5ndmp耜]_78?di{ (oz쀬%ƒq+ =c\wꄷjα d 5NO#m#tc2";X[zv$@3#V^UWLJQh&Hwťoxq+$eU5BR&HS꨽[Dg#N=*i~vN_R ux+9|H]-A~Z8XX^UD0"j@a2C|j`lkmr ǁ-#iםlCK ^H "ciCBpS|wz5uM 2y0’N{ 7 Qrn58nHO"b܊Nf 3rʚRr*`u^/ cSRףf1-\7/jw8L·أi5YМĿ|"`&%tc6G3J~r:FDbM{uR @ͫy t)DIW:VU67v UGU-O,Th$JZK*X 4' <^wVG/06?)R3_pP@RuKtc @J.V"POM5|7oq%"x3Q;3nF_o-w&# v񒥸A*pR8? .[" ︸n-*˲JZxvSx)cJ$ 4ʯ{vꖭX/ (dXpL,w ^4(C}׫%H.aiϡ8J' bKyG4Dͩ7Ȧ# fawFn6VEǢ^+HAX{ / UtVDuu)hU+^%ր1F*kpn{8*^_%V+HXP;6j+NZ< jp$V{T sD6 OXwH 5 $0XIzI'$ב@ I a%GM@ _IAMIYH] $O~sp4LB4P %]%/]q.__*)!%K|.ڱip-ZGDĤ@v)mz cq9\X#P"iiL)ojV-Eu ZM*I/i%Mh~INI mv&3V!4]/\UPՍzP7Ċ3س $2h4hnLŌڹ{)t 4e:Oȿ -`&u*-htz# ^ HHL*/ /%x#Б IHU5WDOW5C/Y*. !4g|W$0/\GBZ8@ KE!!-/xipb4bRIMF՘h`;o .[AMdM-wBy+V QÖI%-1^ૉ>7 aEdCQUKp,_)H<W '),!hTE{ մ4i(khL>o*}23gξzCT)@ fi@tR>gξ| wMS h4h# HprR]] W:&P_OQuۋǜ£VU<+#..u]+FSuTX9DRQ^UOQӏ2<ϩ:Vu]ٗ^E9`XZZiD{ Q)%҆^E4?)t%>MnQ"h4u""H?o߾_$dY_4iԁo$z@a`q]J9m]TC)QV8Y]by-׿˗/ ?A!t^VVVyyy}}}gg?||d %csJ φݻgϞuuu555UWW#LڲRo{nhhhooׯ3LxШM[ߛq 5E7j:aowҦ(nl%w~uIU}E/H4ʩ+lxg;r7oI===0W[ZZLA& ɓ'0{_7& Sc0lֹwHFfGa|Rw goG`^eGCGvH:)#^UG|6ىJh= 9I0O,c}Ƿr0l0x׿ogBGD q ͙;/$7>)t4gμ~4YG5t Kh"9 auϤH@rYGp $I׿Ͽ>|2} }Ƿr߿0D#iE>37 df~vFNQM!7D$iH˫>;Vidb.p(5&%D~#eT/[Z @ne4uMQ%,p貥vHL&IЉJ!}AљfRr*n VUs$eN_ukiy5y퀨 V1IYq i*m@4e}ב1 @rUPV1>RL=u2V6T rORVUZQ;4I','?}|ǟ ~443aM~FzFMYɏ3g Y|*gO&Zz! "'h\h \z )% /a%]@#%5mO3rEi=5u#H?8$f7(VZ@J(Kง$ in!#,ǀlɧ/ZX%}L*& /csڀFʚ~Ia/H(&}ꂥg&MN]ӧϐSҼeΌ^f]G_9DPaH|.: #`=oWxg-DT"nzgݘqG\_By{E.[ L"vkRPVv4wBFT1ەMp 27}7q&(QY@\fOm@iӦw?$HN]׳5uf톸ԇ-;QfaiZ"W鎭NrV)\lmƀ;t Y Hw꒳+2ˈ4OǴ{ߡm}bR8`S 5#s%·FI9UAYw3I-;6H@rDu=6ܴWR׋˨ ] ~ 00;کÁT He#y[c:Lwi~6n5Gôg@O"^@>[=>y~:Fd! QBG˛C b,]Ã8N,tʼy +EadUDťcfGO1CBJvV)YEH:[6 onG @mC[v.%>QIi9<[#<{3W ۴y䮝;`70Υdu#Iip0nY"ݘs_;A$dqIqKHɃcgh^1$Lk[p V^- 7Dm6L$HC@% 7]!e }c>&HtwuA||<: wyDA ycbol5u`g֍7%Ki c|K7>H.a|h75tDN'{ Zv:r *}H䈞>4w-h-09s}Bfh|~ƌCyf8+$$<ˎkoI>>]@71>e9ɼBG:>upC\ƛ{āY'U=ޘ $>! ܣÁ5t7cDA_HļBwT4T5tcs%nF4g}IE'!]m'hTб3j[`@2=ʋFaIr*ƇNTunٺaj2Zz5%:99%-z}"|CT4*<[^{nDoDK7ldT3pou,'3+LDeZSqh8Vt ]f-%&ŌJ]B3*C}$MCME7m82x?V^UWL SNzL]'Aqi6^D !rOH#HADDyPVJFz @Rx*l[۾m m#JsN"(9(s9d$EEܧ9soZTE昣GUt<{ν;ΝwWF"30aq5H7[G.!1g/^@s×U]n?7O 'dblu⭵HljFR Nޑ9_D:)08ԅ[6nI95P-9xG" n[vX9[Ξ=GDRᎩkzM8D$D=#y zuܾ -^jj#: IZQKQ]ڦ#'_ݾɅ ks>[zb<…mcR$ $8p-hdQckiyHٌ6*h!Ѳ AQb&qZu{ԬȤ܎hea>A@kU}**#ܾk{[vd.2 <1JxZ+HSYWU۷_xZE{)ºnTx`uZ@7?jAasz|c 9x7Yw9I,uj"T= S ) $Z.;=}NDHMQatM7pN/yu"ܾI=@Ǩ@fNȨ{i-".Mу7#MHL-0[IMHT@n’Ȝ"F 1:v?+Xٷ[AE ho- BsvkG}DDn!C? 5@r G辙ut#FO}ܾ$Y:WsC@ZFQ :H ilGyinfh!W iU"dl?Pݾy$0p1@0dbjZ]GH7:o*\|y;{n:YEu*4 #ܾ˛ߡEk׳u!MW 3F䬣-dŪ5LwQWq n! Y }09#Y IJ^c 2^[Gn Ds+U '7BFH8pe 1]̻34n$*-QjNLn.$s*Zi~PT)pHwe{o$+87ou "iORi[x 4A1(o!7@?C+Ԧ36w^H;ҕW$ZM$BH'<}zȼy߷D呤s -_t V"ܾ79{دyH}@Vٴe';:HPYϱkA~x3 $8p,:?Rͼ)AMݥu/uT]\rPS҆7՝T9-Cu_QRf軎[GeM2 we?~=讣ʗm-(EZa YԋH)6؞%5w0(}U7)Cb~klv#Y4&ú4EWXy˰8HLugDYSA7Ʒz(Zt : Y98H#7N}?o<]bTKQ8FHܬ,KvCQn↞o.A9N7-R ۳*Eשŝ,u Ta c,IJ<[Gc4]b(:U-Ύt 9yG#kܺGJ,v[# ٺYG=3Ȍ*jm{=+D#;Xu}n>3W( 8FHqL[GuCi) ڈ G8 $QR^porYGt%"ҫ,[YޅjW螼{aIyMY4~*0p1@.ɿﭣvm} c: ]EoH4]i"$.+&4ВUNF톉A(E)sWRPOѵ?ΘqGPFI50 #5AqCJ]U ?2#i1}+L>␢ >!@û8,(@?s]I]hD$@7:? )?|A^E '7S3o#QI̞8'<- : ^MkV@T3Œf@{GJ^= @HȪRoD< ݰpɬ*ڳqG@BO}.Z5'" SՍɵ/jYީB.b[j> <{ݭ#$BAq3WIa c4neM`rn̼I '=׾A-N*im١(,8' j]xGeOA:pX \l _[v t@.<ضsc;_{p (B9mC9]t fϱv HGOʨ4@Ǩ;:;:r$y˫h# Y:17&S-hnTs#’t@z@S{_yHVIӕu@[VG$GX6xB2J t@"ͼ޷RdR3fyEӭRyᅻBH8ps Mum^mN~h̛n֑[0 $k`dMM+$$dUH,y$(*.Z?s欨*+qhŪuy&IH8p[ M[Ghe]q=aFg5N 1yCFRgտއQTj @c)| IEV7o{P(9^Cs 1oAݠ@=c37pN:^ y 8ľq#_Hb or7#v~`鉁Ҥu4t.$~! @Bbf:/28aˎ 7X;C-\sa+({T-j . bB_iJ& H9.˝NP=u ޽0I#9cA hY/ZQz8FH#[Gcb߰'y#S^3-r;uU"j_ӝ[?A% nA)WF |r8wK FH8pe M[G=Άu@~ɵ a҈"/S&s@R=TA8FHSuD.1}#1.A5e&8HcJa[GC.фK FH8p M-SpDjQ{Ve׈KdJ-~5| cir豍Gj~=kZ'Ս\hyޱ#uVMԕ{g]:?w=QZ+գؖ,۸y/A> $8p%:qLN$rڈ9GY7 @# ( T2pO~~g}NZ7.58mΜyn!$[8 Hc0MtUVT;S}؊WM9xKʫR?jUQ)'$ (RR?-}3'bQqeiy*@Qh"M$+OIYxh v UT2ͬ!Qy/P!*8t<;OB$+/8Op&id"*+(yuoGD$*\W$?#iQTLVmKOWplvpL6A?-[ZçN֮Aޑ.\m]@ꁰLIVY[YS?pG(JkHPΦnd $1E/A|'A\nˊUkM}ToG# $8pt = n9? OTj1peatmbo{7m7m{SK(%ňw%dv"(!1ŠsR ;X[!WX ;E:z FH8pu `u4-X /Ӱ PNxA Hx8N].Fiu$0p1@%jxKL`M# $8p&"{O(%yD? oa clKL[GcЬ a coM[Go8HL>{(}0` c 4IK u0y3u4:Q 8FHխ呂Eu4, ;1p1:@ڲukDB4uT㭣o_M-òWZݻ  }L>a0uBKAI5uc c@??~G?[GPn%vBm4Zv]BBB~~~UUUGG|b c@o߾t;;1Xj(77_|׿+ $8p H\vvvߔϟ_8礠hTSSݻ?s uH? 0e8p0 IuuuCC}|?kPѸ=8@{lŷqJ[oA_?OK1;`, J" |r34@~an>{ߤ8q, ;] Imٺ?KIʤJF8XЂzy?8G=y% N|5{i{8pY@֭۠6=qze^u\N" /1mV~o ;u8ǨwGVWN H8pL2 9F ⺺W^}y|?10p4@B S1p)rrrZ[[G I$4Nl!iN,'42Γ0p@`ciH*;@it NC h0p@@R0pFeUܣՎH>I,1r舸S !Ie8PˋgJZ,K8B'/e3RҁWP/cs[ . rnY;[ɵ}3f=$fK{`\sI]$K00p@Bay(fxoo= ][f"vV\rxe]8O?p3lK|)qܷ_j-z/8}ӭ.{ [b~5i/ x2s=#r,\ NHH8p` -o.z e10 #8UtN3$P;%Xƌ ^g?=GEtmP(k \oM}Mr·9¿&.@hj PdC@v| ԏ[|7( *T3{H|Aq9 /C Oa$} tv%? [S_s="aT$1B{ t9y` }{Fr{ ;~x$ VؼΝf,9v@ι54A\ȶe55=_p1Ak皻D̚=g!ro3" t9y` JB%2h8%@ RCyk υ#v]ôsROm@i(y_=Iͷewo#o]}|E;"_ogD3dlE/[|7flz/!_N (;{k~!k!;f {7_~llA 2{.1da}|u0&qnй/)~a:%eh_RF -&8 ȇF@J@#&@#i$zu FoƓF́DG~@bJ#z 1Q4b$:1FCHHSH}Iat 2 Ɩbc@Ky4<eM#VQ`@S ʣG)hT1,PѨpa Mz 6La6ܗڰ!~"ec@v FBd~H42uMܬ@@b:ܗڰ!~iXoAZLq,H44ޏ_mͺˣ!GYm/L N q u˰bc@&Z?ǧ<FnehXk 0Uˎs_RŵF͝;mXͺ778 =#oReSwËOy-C2 YqܚuH7qy  -bc J+? HG&h9ִSk/Y9!Eg7gbN(".CWqZ HŦJn"6hl2 a Mƹ/HH}:Q}Gt@bljZ^hH ִ-_f]u_MKҝf.䆃Ե @GY4*y7ryT'҈DwԹܢ*Rc{祤?)*+ Ez'b2 NKH˙8XϘ1󈠨ntb&{zu@#`]@""YF~%G:z'C4uIʚۺ@ѩGdHNN*i;Eukx-a1K4ST:zF%͈FZ)zFv,H 9y=xb?Fa-5 1p@F -bc %Zv8˯@7o $ cʪTA-ޖeO0%K%?+;oޕ|"ZHP :b $}#Y,uv88]C\}֬]O0V񍛸l]||{[vAsgja9{IES GWp̬]X:{zƜ|gzT7rrY98{E%/[*w Ry6xy4uH8qb HW! ͚5@sJVQE_wYc'^ɕus L& EU-kg@m;I 9$'ԯĩPCs9%uYWMiUpL٩۹{{)4. 4uHH81FH^~GɛFP@bΛHq1F ͘13<6ne ѭq 3G W͚5 =け):o*#R#(*B@D@2Eܷ|X;'5&.{ukR'l&HZH p`DI -j'@zI'%-;goЈh!~R,:zsAcH2eu/gEU-6+kl_pm^X>8“Y 8F܇HcRPbV9QD/&k{02Q}GUcn޲Y~LTzەۤj87m,S '(*CQU"Vw7e-̕NύGGYHF<@^&1IP- DIjU4&.\[w޼tn< Uʫ@8ijf"i&uģxdfRP<&X{[׀fج@60py?M<&c  GEi4ae3aU#-/߿c|'K]kO/tzixx-kxSq4l}_vk _Z\Ѓiˣ> FHTUƢEl!UvkElqt>f9ΝDς"LJFHt10:SXNN|vO`dۿ]~G\RcR)aqJbzUy i휽#r̄jD\@z38[Jh|}9q)Ӏd$SUB׬su?+0Z^I]D\Q^Y3RQ:F2 H57=QgjM_[HDB%.$& dzHi5 O:f$% :{ֱtG%9 mǶL{ ɸiiu nQجTv(,dsazqwyUuѕۏ_ͫCR77Fi-H=4Zvym߱ P3g\YeK;ߠm=Ll=#"ܸ)ene5Xy"+KHŻx-Y,69#+juH h+kЩ&$:mZ{X;ypںz^rg$¢{O8 /(u}Hlw\d_H汕gG@̹˷׮ 'sprY88z@26.$ k_'d?tYruBV(9x n=H\FǓqUVHѱKJj_vǔRbfiBF1#?kF\<4G@gj3cL~aE54w@#: ~Ј&.JXHMgpN~TO%J(k%>GQ:c "mXPmp:wll$dUሪA|V e.*Zbʏm}n='ecx @G2ԹKH Ə*"gc{HUlrvBzޜ9sH ! =˯5kvH)&fP'1 RBz7$^~!$KbfQBz!j7Aޣ?/h|iKEc7$a\5$PN;pRGq5J 7n dGΞ=30T[&{ / 唵ok_fge5&# Q҄]@z@<ħ}#PQO>eG8+>{ Hs!'sn;ze3^ܑ@b{H,T4gu6Z:X@AQ>!GEIi" ˖XH ^.]Hk;z2.;%%D4s,{P$KGߕ֒:WMƍGHL슧RoHA@$eo (3<}%>5s]b?͟ojnD  Lں}!1HFɘY]Sr8Fz ms hyAzwHD٥~ɵc/{f"e=w-C.Ҕ K48:TPÓ}CG$[܂/F$tx%1:ҏƱD H@ : 9߀rB ZH l4Wl1DMS燁r/TKTԷT6^l~7ԾjDyeG=ƣ݃bҊjߤChS^@ޣܕ{. ߷Yݕ4Fŗ#$(" Ws4 )STgeZJ ub;RM8u}0|8|hUBT<;EuNHʩrm۵m篜\R7! q su 9H! Zb 1h56|㞵;G6oizqLTG˗a&:D?qUs1ެYMk M7-cdDQsW]Tľ+6QB{Hkh4lR<+:=8e5J6Hhݿy`5v~8+@ />LXlMG/0FHضm¾;W*B@ۦ\a44 ؕGc:|Fe l$6QeJ $M@_O+S& b: D0uکnP3f@"z^GcoJ$L#GO$dE$d<bl\b0$6s ^A턱 C FWy&\\>!;ٸNvk:rF'^6n!"@a~;P{3" qnfɶxG8н'΁'\v?t9·*ĢV K.dUt1_X (uSR9篙;Ƅk"ґFmԵ1s$F;vpGbj#Woߢ2r]euЋѹpцTWZ-$*%".!ƾ"䩽Q±[P^Ķ;ix#mMHH*C1TїsW pUH74&Z$i ɲ $ԯoHZw͡*"Ev)H:ԐtYUP5rhOH *H*Ocl֭]*(?AY$ҦyU/!7pl K%dT|ƌ9T " ]#Ec~d\֛Ϭs9{/{ÏZgrɵ/+zu篛 h螖Uzf jHK@@BJA@ !ΏWNF/>Ci=:*<#|.7# ѽn= H_^;iyC|=MSURץx$%~ŵ lQ4z|S[W$-$e(RBڻ&) ٹŸ˖\MHLzm\BTKoxKNY TotQ] I+W IH-b[lқ" G{TD5 eb#$kHXw VAH.~q~~1/Q<\;Nv`1i^` HΩF I+vn8l?7Qi4e5l MGCRd"1ʥ\" EqHp@Uqg(4$d(@b!9*↷˖ !D?w#YN$C._I'! if|hyo~I 9# єiM'UH93g2w D1Zr Ҍi @F wBHt@rpG2A@n% $O=C2AHU^7QE$p>xHB!1c= Ԁv$sDH #GR!>+P&Ae5t U@3f4:SL=8kKi6Mƣ@;aHR`1ʥ#ZJr 'Ǿ 5H+.CnA[I嬁T3@8tqݾr*7mEZJ!+UGp -⥏m<$ºۺF̀$)v#귑}mtE^U戨./{=&542LXzxTS[6տntQQA7Vw{nQ =S|@KnApa ;y" E Lz?|7I! ]Ǿ?H* I^jv޼eG1{Ƀx ~@[(| '͢K^ g̀' &1q/6pl^ιcF NάxDe _q7Q xj1& uOGTYa MAʦ,;)U/nTeQVY[AkB"%^ؒ[jPdd--Ar2J;UVeTNb~sjQ;) O_1t6Qf8{];O]<ڸb9Ml܃hM/@bh:Gģќ11U6Qe5݁4}|Tl܍쭜|*{UYctYvX@&C*` Y`:Z:R;u]UAa#5˛˧SAw4)2` $=\1 HCh-\?Z:[g-#(ʚ=enB6wo;wWH".QD5Fw( 2MU|;۬caeh0d^A۬: ) &Z!_Mk G yLL }*i_V_4=xT! %_XTΉ7AH>a)C">qH#у_pSk'H@h6H=wK{w[goR,IHVaH,i9)Ytz111c&nhl˭OĥeUSA)z'k!:w5(*[x"e,2hEkHAi+9HS+rY鈏"F( Ji;dnﭢ'$&™HH>o=QI Y44fl! FY5;KIgeo8cFHbRYs@H+PR9zg:BZ"8ؽHH Q VYGjdϾnkw/.߸HWR4uu RiV~qNwtR(EUm'Ԃ]v/" "%$&)xɲ,QP;{1) Wg7rv\d_lytuN#K{`k׳@Zak{5Hbx3 HbR *g( _K#ӧCiJG6N>f֮|#ᚮfv".$& zM@RG7c!M-]l6U K&D1 $ػ;\jMrsfn=7w;d(-wI⁹+rH2fɲHT Q2l`爈Ko@2F1I @&[>k즗Rl$e%dyL.5kV}$UAuKv'ϳ-+kڲ t-#]? Z@ cak -HVH>ݹ)*7.n>:ib5Fg5#SW^K=F('Pbu9@r uh~#ĥe@WVNxP3tqgݯ#]v|.m*b߸)(:cP ŦHʛz%mcKn]N5j=7Sc\{Ӕ3k{7M<|ҊZxY{9[j韅'"Xy=p ;ǃ luO˪M=ٌHt)lʦˊ@&iy@ןFֳs0R I#7.24t~ٵ{&d=ʚzPUE$IplШ 3hOoOG(qy7MT1Hc-+wUAQg.i"Qr@Bw iq w7&{PWl똺z38cػ"fD<=yQ9hǑr.R@HC< \[!ǦO[Cc)ޒ@YzW4! G )gՠ$FlF dI'({y_P H˩h;Tp"2749SI ghɫh_z-OwH"|+Z}"@'xYb^[t M=F@*m|p!^:LTiJ-,5kVueWg@#rm  &Og((mXkz둱r7Mg?PTnEqŧ}=\Y9@V.#e7qYػ{PWo@՛HTcoE2^QœSZf+Rʐfcc;?$_2&54DT7w5x?RZ_)je*#3Ql.{\2f&ܪoj>ԝD3NU/Y}fQ# k_ݷ=*moTHE*u5+`-YUtI-ɬɪxCmW˦{FIx[zl:ͺ $l[7uiI+@H#=ڶw5mD\\7򨒢 d:LT-/QDd4Qڲ #[gʖGZÒWNiLTuN{`N<2$́XN<2H@0j&sҷ hzc H8v"6O<Pu12QH}ڡ.H>}*ځU:&~UVH< H,hT\;Yy,i[riVԞ]4ʜ@b]c3yeGOmKFz'N+#yh&)CXǥJ UP9vTae+()a/ r i25$: Y1IwM@blyD) I6d< + J$<+K/fT'Έ\o:K,z5&D*mS 9 e$;&pΌ| *:a)ZzRu C4 I۸4]! "u]RAn>VJJ0XOOyfR GZ!$SkaIYC-jX遹[lf5 YhD'oph qn揶,S= Ԙ$K;wwpиKbOL/ojL st\dYdb6!>EMh?@2f֮^^1YT7wxg5^Ԏ*#PsONY/ʭG$SK7^T88zԋK+ ɸE[:-Y #9sGhJ~ז]˧9sm޺Pdj6{Q){mB6plz GE#-^6e6MIea Q4h(j+\-X4f $ECB$6~{yDȳ (Qep B2#,Ss]&f6P'i[mkOMKȄ6ڔ#8$T2uvËH,P@?k֬_LxZPsyHBb3@2 ~mفdb S ;s֬OH;IEuj_sl JG!ECs*mA!d'`]|058Dt)yBxuݨpISfUVHF=Qn H3WEvTTRNRV?"R_Ah8Жzwh@k8ruOuߥƬH3>""~0%tujƌ䔵BABaICQܪy씀kI ٺIȪ Ij$d" ]oye\A? ʈH*:#\{a@l>RqGxe->a'~O_0<̙+Ed$ɦZ7Ï?.[%a*!4rV IP! " uknUD\# D*EE(i Yg Ӭ\|©@bTs 8'H N/( %Q? }vwbxD+<|2UEP~tvG%ƣ)I8/(u}仵qbQzظv1 C8%˶di@TF%͈AVQ;Mc#PkwnYQ`L6<ԅ6n95" *W SH {O-<" i>HBs7,b2%Kۺ?Zx{p*;8掁QUZB! Ź?y9rؘ9y=hFPLOH%졥".IH@QG/_j` ELeIlJШ'8jZ:slLE^IHH2 40^ ^:l*e)$. ^@A@r2. db՚*i [%ft ofغ#]g1bwLN""!u ӆCH\uyY T>e.;,D쐊brTIkM 8,DdAmOfg4de_0s>úwBCmHH6|gT_7VɃL/ydsISO6$ߗ,]j" #JO)ϫ!ѩ_@)>A1H2ɤ3F^*;>ИtPk㣬w]QA@tu''$!!TPVCo)fIq^IWZMW_޲y3'eӞ[HQtuV}xEUg C)!-{ouϒL*73ע~`wo$BZJjނ0Ge6]C--$I |%0")6/ҳXEezwxDaxZ47A?tDva\H zHmfi'~[oS%ߚz3_M{t8]  L-lQHP=چto)$g0<hM($tGY<0=Px6B<)05nxYB'#O\EW^䍤hmzu-]u4@H@2/`I)*SW}u]AH7*UhhFMm=55PH<媗!Ù[ۙn!O` 8IK׈ҧ zIKT"$' >SVw@nG=ytэzPq.80Wشba4݌״䱤e@R(s3Ŋ )A~o6\zd7E|I;:?sg/ .V[uxS)'{NJBjUku^'/BR g^?n&i{%ԚLJKT\BzU65v*XvY|.8 V(Sq@yG+ l]ɢ(qdB jr*{'%(?x!=Uhj [RxKT9G_!uImfh%t݅ѦG6)Ί&NN}h]רUUG#lKT kZnlWJ[W=XtU[=PLu[H|mc XOU-Մڈ`$eמ()lUƣQqj\}BJj{_޵ϚR;Jo8yUv0sh3/Q?%ܽ /274*G[4 Bvfk1xFG1"3ԊSI!V ɤʁ†ң. ^MW~)$%f|=(*M RBħI"!AX26*퓲ѳ #u($^N!)X7݅4)ӣ]{>GKVԧvG%!1"2\P1fl8NɟQ!i,rW  6/$dv?#&n>"/A(`b yniG- /1if6z[v;Xw5,m,<g LZƤ!2u#{| `Enߡc`kWid8R5uͭ lFA]Z~]}k BxF1fos]u7o5biY ٹ狤jDzixk:G?1M]/"}[xv(&jM=m{Zo!J|BKN!nla{ smڭk|2җߝoRjz \C!)XzPb3_ I3)G<֭- GTē37ՄLQ#/$)L띻~2cau6m I!QE!] KXԢw x7ߟ O,RMLCc}ff@fzm;R@H2ĉGH|Pt_XX T>z?4 ;lg:6d颫[.^ªΫp l>{NdrNiw`\NY;sɎj^[m픣g`beg]"=mZ&!.Twhb}K=6Dlݾc.zW?_)}"_Gˉy ^Krh!#9;IDpwh:/$Y!F 遬}1y\G]⥗~wgnt(!IVz; ڻ%^)X υƦ#&9W_z{8z<)nI٨BR}A}ͺΛK]ZHHKͭJή`(1j鿏n6$ADj;j"Y)y5kk;G19udKHnQ1}whUBV|`E-3k T(Y# vBJ-kj-Y*&B:`UDHؼUX/ռ5/: Cږ" O 0⁓DV};:xLTxINbFģ(xұNҝJ-O\ xt;ʌxN_֧#:*u `:<Cn=|W_K&mPH)NC %n!-_)#u@O`Q^Ys]y>%$f7:pH  !Ԅg^a ΌW_ë®SY(f1X| ìj\}É wpD)RR:F0bZT02C-$%.Vs {@ !%0B*w B*dmVDHEyK, Y ޑƎ)".@E<RpT2D<"m6(4:u oh]" X{]ȾUx #^]xEpٔO@"#Vo.!C!J !$xeP ($c3yB惍 0*C! GCE<9Ԗؗ_~yފm&B:V}?xڮan !a7Yۼj7Z^#w,]NP,F}}JHri5ToњuPߗ$Y'omT>j֬蓵FѩTQ7]r;tUo(dGI$κ!Df;v`q$OHa/} Bbߤjl4!Ql\leZIsSGӣ[]HdD^AO72o2RۉatE0w]f#N\"BRD0%zL&<1KTBS_UNMk! uI{AC5ցe1?6!c=Tc1"2tU^kdD<勂OET)DGɫ+#!^R^J[n*SYޓW3INAy_fV]힪<6bbr+6۠x6s/QUƣ'sM_!)s=UxO1j06.'qӾH)p>&D# +,ԄG_"4zVߣ Im$Ƥogd-a2%XW5Z9ӣ^) xJ\[=ŲatB>b/$l#^H JT;RLKޅOvwH2>!ŵ=*]&lpxWu)^sʕt?Jx’ujN'aLu<Ⅴ=N!AyDmYڨi>YOwPW`#$I9Yy! $vG[HR'aEVH.a4븅`yRB>;&uam3SH{,_UBH.S INBEW\)N| `2$$K=+&GPNy'&?:}06E˂i# e#y':o61bf/+$`uG:wb{##)yKm0x:{c#i~ܤ]RN{WRSN.0@dZKR|Rhfi6:B?GV#s2ݏJ?"Jez[H(d2[NHԢVrx#KHԗߝۤgTuՁB!Ѥlcƽ 2uLaS~R0=Ԣpz 6*4=h4".?(7Lɾ7657f^4Fwi]0bdj(Lzozo]vXRݏGNPm$bu6°#G]txb)Tu}D@6m2?&ĤIu?2ϒctQ\g#](o*kY~du1t‘$ ߸L'8q _*?F? UBQsύ_H="g]6l'+$FG!MhK|B d$>\g&<Rz~mZ^5KHCv1rDPHI&fTܳ9yfCq㞉ypl68mc*uKtU>IБF_MǽL6عOei{`ɱFfV;X紳DIHrW;*7Ʌ-dajhjix[VH564 I{mT;! ށ!:NZDq9@>=}` ^DRkج6-$V|*:E/wa%KO;H4L@H}"Bo.;Q2[@¾/o̍g N*bIK%Aܙ&Bb Qon!BLٽz&DHDX{,G-IhSH  ߒEmt#hGqX(R 龛QSXKTm G{.hm TESHXQSB-&B:*[DFB,Ϳ 1/c)=z::L|Rĉ/02pSUδQ**֩k[#u *ZW`Kob^w?9{1xE$JH!ɘ쨎8H ݾØ쎞pڼ*.~Zn!EqOb|A &FL!)VN<#_źᄋ),jέAZvpy/$)!I2p;E&[s* I4oB]?/1뽙aT79xuy$\)5K!$p,qs /8p &1i`QdYB$7wٵ-$al!Iu?zon!acQ ,Q%BGx$NKgl~Ľ G:dא($Ap,gC!I|#[1ɳxӧ0d=PqlGĎ{x8ބB "Y6Y<; ➎b!g±""RXHL!16"frp|WH*b^"B:nޠe8yt!1vq;< +ZB~u㐗!;0( cR 3_ezyzo",[1"1)?($lnl"42uwXH(t?$[%eqKDz$ެU"$_&*AHG&B*l݂)x!q)$*ݥ I=xsk=L!aceO|&>NakoL|mDH2aYbRW5{.Z#kd W`3έ;$Be .KHTS~/!֏l )06lTXၸ%XU'Brzs)ͤI^cZNUHWz?⍻~ȅ,^GMr_ê'~2{: v%nYpHzkW G`QDq?^pG@f [jX3[QDHݏB7y?H~L+`]`ڻW?*s޽Wߎq2;$>hb#L|($: sb#{%a'E&{U@8щDHtc ^.{(${=c $:B|a KRQH6e,jTF='PBQBv ( Kc)$'۾)!IC!y`.\ +-DHyuT;s9`Z )A[{G:+؆scDƮ[mͨ)QjVu[渞R[R{<*mj9ṽI,Qe?bJ[n'.Q,ͫ=ˬޛt bӥ%NT:+ PHM̸C=oZHoj"Sqoι!N. ĉ5CH4BBB!-z *p)2jm뀒BI%5NT'B7AK)j'BrQ9̶o_Ⅴ@HOD[i zJT-QQ¢ {6t]YtS&uI' lVy{ kzd q&*J! LbKPQ>&E`:4L-jUҞ!3#"NrtMUlYHrl\(Qm꽩J﵌뽕H'Q*k#^H|mգT&\!=Q*<ƣ@TRSV܃V'ԣ IXt`zsڃǼs#_;&j:n4U!T^uJ.vݩj%ҦVhuSJa5ͅO^X!=SB(SYx7*?=f6²G +5"UW I}*;NAy}_腯bvW28zܑQt)QvwbT"3QMmx/kD6݀GykZT t̫YR~k ($OH ? IW~NȰN‚T]]Wć%hdr+:5._}[-v1 kd :eW tҴYGX[B!.P^H6£s3{RZUZc3psE$SnHWH޵z;OY&M^H̽ ]}~#%8pFs5,vǦ )?v2063jԴ9fOݏv+u5,deLϯ6"l|k匓7"!:ɏ {0%aw ,>sS^H 7wl+ysnbNsXGS-}!$:V͉[&KH%fw*$QFi!Qo]p9xgG>df3BhGy!/6hl I!2c Iݑo7b4y_۾_w'v)F[o6 NƜbez 3(,ddq`y*]\&1ͷ6CpSͼj\0C9~!NlT'k]"Xp q'Xe@aC:[u8-t7[ :(& Xr8ypŮ}-or_K~u͞z&?7.+1L~8/ڍ&mQYX[^FfZP?qx.zxwrgi϶gܯci7 ۓ߲zN+CǶg69oO=lGH+TcߍM: $vGr:{-KviIV1ց0OJ.WC DHiyU)92ȰB!5)RG * >i ڹ dɎ,*/ #XaNJH]~imǤM^GudRۖ6I $}q7􍨶od aQ% _|yM}-hlKB!m1lj),pcRZN̚=MX 3[YKvԖKq/62ۭ~#+ &*8& 9efKBXkLBb<:Y-Fy6ˠ z6RT7ERMn'fSwTdw ,18kH.}f%$҈EFdKH"Djg[vB+?$b@N)SM̮L-#W{/e$9B;ob d$(2Im{3 ҄펤ɛE SHa{XBG‹L!qB|RsJ^kJXePzn*w ='"$FS}zl6a[ZZ#$G[q2tݦb_ʅT"럔+߿џkS;?=zL!MeWCf,\YQQ244?N14G5H.~8ɧy0MIT?=r5?sͷf:;;oܸO?'\M!'k2 3A0R~C9f~׿s5.;@_ P7777ң?']]] L~^n @A###CCC?!@ )|Mw1L?x&_Hd~oÇD޽{<`~WC&'I @oD??F<>ɚ|!!T0AM,]DŽ'*$~?x!?x!? ? ?^H?^H?~B?~LlIENDB`glance-12.0.0/doc/source/images/architecture.png0000664000567000056710000015052112701407047022674 0ustar jenkinsjenkins00000000000000PNG  IHDR  9IDATx \M5J.Grm4.Kn#LBd"`q"q5r !rJ*%EW"%."Lsf9sewyZ{gZ{}>OD"H:$D"6H$@"H$bD" $D"6H$@"H$bD" $D"6H$@"H$bD" $D"6H$ ߗ/_._E|ɾ}޽{7qO>zLϟ*-kHgzZ/o?Gl_|ƍ$5)55uҤI={,--UZ2wܡsH"d /O>ussëٰrJ#:ݚPhhAׂD"]x\bpܹ{щ֐)-{#HJ6Nbh ^r>Kr|Bw<$N=}D%Nbh Zm:vhllZ<<2{{{yU-[r $tf͚~:uXYY bu6"hٲeUgC\\ŋr<ǏE $Vo߾7;WX5! ".I~-[VXWԿ\27&wر<'$$.gƷ|s[[۞={͛IG?\/^iI޽ qСp&Lb3AIw/H\~gGGG\*nS֭O>-t+ڵ+~zժUX?w\r5+עE $uNNNʷ@n` `"fJ-*5  '}yWRSS5R̆Yfk׎U<==6l\H^ kAPc Jx$׭[< Plg:))I  :kgo 6tFaaafffZ.CCCz_ϼU^4nbrTb6o.i+jIr n˗7n,ڵ+$$D$l6l?0m׮][lYXX(œ9s@RbC%ͻm8up?;yo/spӞ#&~?&L76l tn߬+dô-4ijZމ ֩gϞC QPAȆk׮}Wݻw={oڴ{0`@޽{pqqر#y [ Thذ%KdVtmbC%ʗz_SK2H0~Xt<EFaٲ0 Hdgxc-@:F!`СCZYY#Glڴw}SN}վ} UPPжmܹZI||2l' _ơnZs:p톍6kW6%P-O1 سq k  J܎C0AZ^ 1 :Ͽ=z4L6k111ٲeĶX3|͜9slX`1 _?=%7 0(ޫolE5!0} |}}cbb roq ` lqaٳy桂]-3g;A}l[nayyy֏7$P>0dȐreff.Zq999:cwIS!ݬ xɪbʃz붋 Uo}5 !SKIoaJ^1oFKـgӳ揪~nv޼no޼*۷oaw%ݎ$ VVVxK` իZ QQQ8[s}>|XI61F^ao7T|p5xӟ>G,h/ rq6Й#U旘670w\fP5j4|T +_v롍lk;hc%=tPJԍ'Oqԩ3!4=^v(fCQQnN>~4??;lM?^ݰMU_w}l~vD0ȨPNVٰqa¦=Ga "#6%Tioo[֤v)cPsZ͆G`` # x٢1K$%Ej;r*XXXxyy+y--v`];H^7YjV")/P?9::FDDƪZy\t3ǸACb_~@RRVo?ŋ^ 6d f6\|YYYx 6ߠu UoPM10MQcߔgŔg{zGu4J՜%߿Ol Ut[i"ž)Ɇu;}F=mP67}W[þ@l PgA/^Jt ΢p1k#@l]N5u$ܭW *ٰp ѝly̔wyW/\=o.};|i wDY7P_a+`Dl 6h`]ML[=6Kz *X|Xzic׮}'^fe"BO?ϾZb~=Vb[ZV.^q9K6 $64$7 ѩy:Kӫ;h3p&斮Sb J܈|pK-M)"! :އ;`ǰpD7@5.cbFoy󦤤ٳgYYYwMKKqFBBիWqΝ;wِSN8q˖2888444""ŋ099y˗/E.bV!&-.*ht:[l$nx~M>3fC廮G [\'`l}eYhG.[iyئt1Avchd>-cfD6N]`OF znڸ;)iTǏYTlؼyM6n܈oSRR6 O~p6~XǺ)y%ʆg4P,pߩKOi60'clacZ7+ ߡhuԨӧwܹyfͲ+44ٳg:or-3a+z|e _EOOpkmJj` (Ӧk6&G!R. U1$&&❽a kY%6t zE{ݞO)Ɠ]GRσdza\5Ξ͒cRMIbcc\ ź4ݻwÇ;Èï k⢵NΝiZ}@0~0pQɓ+zM~A bT3gzyy8uԷoNOOAiiih: pf`K.1"**Bu\ yyyXoݺYfM o 6 29;;3fѢE{=X|Xj댌{eee1;wꂂ'gϞai^R\W^|Q'v9~~~ǏA Z̆o 6t Gvss2eʬY~a->!!\dڦ2qp bCa…6m:x w```HHٳgϝ;w…hIII7nHKK].X6Rzz: 7'44ԩSGulolo0nY/k3+Ԧl޿ln߾  aժUkld\qzb[l֩#!,,,æW:øA#X퍻_>;u[u20>}\'~c&ͪx!]qaV; lxSSS oXfͨQ ԩSqz7</Mwicc3fy"` /<304d#_ߺ%Y+}.߰Oq 6L@l<<Gl 6԰߀VVzͭZ74j+WagQ1oqF_vXEn,&~U =Aϟ? ilXjݻO87---%%Ey6xzz.YdÆ Ol 6T 6 t,*2&&MSL:uc өLȸ\/>Aw]bphmJ=A/_q>~8nM6RT;v;vL56dAٰ~x-`qF,rbn!޳ ظ0>Zy&V V|4vب())Sd>vQqFE|Ć**55ڵk0܉㱱0gΜ<y#G4/ٳgq!up$cq !">CI.Ɔj*ã@[k6 F֟o5&oX6/7ֳL`@jr1!__؀ EN8>}`@bn!$:< ?30DlldUeP5۷| <l}z7p\,j`rq%!!ŋ0B_}a3a[> ߀`M6l|Ax0FH_HstEnnn+WSg٫@7hG+@l.67[n eBddd<|6M6s6$^.-`%E`K IIIW^@)Oؠ (u@ Nm8jklkϩzx䭚 @lP  >>>$޸ T p` g䇉=f~pipN>}5AV h"6y3SC6v`P&\4sjն^. VZ~-[vqg ǰϬʼn x1sfP+t2'0uwaBD8L͉ bߙ3g*ߐU.@l-lIof֒h՝w>_xRPKEn]{S`seiVaΒy3V(aŒ!V޺%l񿸸Ȇ Fl 66SBM!/Zny`A^Z.{X0^23v+ WoYs0&xN| oobٰo߾`p޽@l 6h7eQAAZ6%wneay&88ESٰwޠʲkP'!!7 :yUaeɆ}WPyV679AlسgO``U`*$&&RbSU6|i+(Ӡ` ;'6TJcƌ 'NT ,AI6? Zb[bEl {i V&XЍ "6M6͞=]1`U`ƪ|4ؠ~CGʗzh)&7h|\ vҚ'zA\'w\#ylP[y}CǷlѰ<}Ć(???88xŊ? G W CM6ի /^vmll,@ "b3 ]{ò^M&\8O6`ŒF_ɔye"OMzxx>|3v!i`͚5%dl@8lذa~~ 9U0ؠl0XZ)O.q8BE< jIuɮQܿطٳg݉{…ӧO ۿ?<S2PltRdd$C@@\>>>Ѩ(lKl 6R<ڣKALL >,>NZ#Y3gڵkl޷˕.>Q`r?ܹs޽zl5 UbZkبDZfҌĸAN Z$aaa<s Xg-l7>A:p,lBHHgɓpΞ=ˮ`' Tg UbC?vꎚmln bC-ԤI~駣GŽvۇw`|Xa~}n-l?,%Cel<¶j`ҥKklP|SR ? ۑ ъž۷o!كчѳgϊX*..}!ֿ~իW8,Û7oaiO4`qQA+b<Nw,*Ml 6h)xے%KvueS^PPLJ>XPUaa!_).\6oIs>H ˎؠ$.SH<|>ē'Or2ׁy%\QϿ{.<o6oՆ 6dff$; Wv +W]fNE 㑑lReِzU@l 6ԖmoS%Ui3fI]6*뛷0 'aU`j&$$DEEQbAͿ^b_,EExq)|%Q~}y˲}+O^|T Py1+Ee%9+XyRTJJJvޛYX颉$2^!F6ĨƆׯ!I@l6 #*\u* *P=lX9ECw/;Uy0RRRpiΞ=K :8NI ] Ȧ,0HPAewA yТYx\p>񞙙piiF oU6(hD*L#R yk5AsG-&L?~|px؀ #""aҤIb bb$Ј NKlg`ySl̪o>~<..ŋxh^.64 6ԭlVW6o+srr߽{WHbZ;xb }ᗱ;+߈4r5^]gmM5 76~T m] 1q)Od?l0ĩ~\C(sBE%A+n۷oPcbb jTڣW9 @ s7"a/sflHܧF$W~Wp2f>g wz OM{B6̝;9ǣF"6tvqαI +L} lP$ņa&L}ÜݿN4UF$Ne?30W9Ǥ66jԈÇbAPif]W!(vC:JJ}ڑذ}/\lSfFl831[Af;y6}  lx# ߂ rbϿJAwT`E lغxSֽW_F,Z奍 z@l fCEoDsb(GׯW ^U7=eӓd>yφH 1K SN0аQcuyd́SML^|ìY.B{R|S"A44-Rt|ZJF~eERI'oj*-~ڲMLZXCqq1pҤIXزe DTT7(AGj*OeʧB Ԕ߰iݥK8S`xxx 7 j(OU *//5еGcWkpKKKe>>f%b7dATQc& ut0p7߳4ckM޾;t˘ 1I[6z7kޢe6N#]Qi>]i#]Ǹ} xټk'#!#?쀿'\TXb{ԧ͒BYy6xyy /nFf͚ ͆[)0Ƣ}&&lfWi@l <<+,`\s%X}?8Pd?H4V߿~8q"pgQQQeGTgۨQx3> wj9]^Ns1 D^-KP(>g N-H( !Rr7c&LWl[b 7;yT[I6lszq*2)puhSR\I&u˗e+ >kaԙݬ{zpK>iԁ8`<6>gu Wݼlm 0Қ3#;zK!fU4j[ C!~!TC|A4߰rJggע͆j[R").|DeJXfL2?ՠa|KLL~suٱa`T^_`:u^B-.`ܹs լ\-ڵa9x d 0 հUVIGEl^6TkN݅ʹ (奦)V2oզlf;g033ShBfjjZ Z^z߹sGYfk׎ylɆ 8 KKK0tuuׯP;mJ~+ϲep̷$<*-eyr̆Iţ  yTs|,Xed93P ppJǦ{IIIݻ7bT)b,;rHT¥6kPرcaÆ&M` a߱ߟKqv¿ )ێPcǎ[p _8*+vT䈖 gy$0xʸ +m_uuزlwA>gI*RlP2CXweaQY{fMMc{enĢU^oׯ-&7jM)88x+Xl˅eE5___ō{`ڵ_';>*gFڳg_/dQGkGe'''-~߆J?*(;;[ؠxAFo\݀:6vꖐ8pqlދ TH{)PS ra4lclSMLj"Ab~Yְ)A -faY13iӦ 0 ڵ/A~~~ع4˂&l͉'a?*.~TP|yrDeA 3[ Yu9K!  -m,;YO/̬&`O14uTT)C&M.]*o k\\\Y?| ܄mJ`yC At A<9e}ls ,ɩ~ޯ #G]K/`M7`w4d0g i a@og ݅5^>sT.T'd #\;Zu(_v7@f\n9/` dD*lbCmoÆ -<|ooϟ={snggܸqcxX$ccca豓y5lؐ Z`ӅQQ1CxTZߠxQA>g!+q/1a%6j~랃Ύp r_a؈e.ݴe?;Xj3܅WZ\CY io羮1.YOC&&ؾCg,{&/(:">#<.M!))INVؠMlڵkB6XyQa\B-[a<F+-~΄{:uʛ6fB6m+<*m`A!{>~k6eb̹C931H0]`< J3n%+6J7" 8~FOݼ_](e ssba| GvE"VNCw vOl-lPFx=qI|٬Y3, 54 };wN"~M!qTZdž=$䈖 . +t]80GϚg;xW(kD*[WƆ3Qe.'eJ9<<6yf lPo|C3SH,ٝ7B>@F45M_x=AX[ w PQajZĆZ1^VV 3qj`#"3.M.Xte>1nSQ;ңlMSXfs\R= :PJjeP "}e6fbA x8}Z7 &&&?>NIe6Q}61T(Vy;F6(26HG Ͼ#|$0|{v$!nS)xdխًWwTHAyUS~+RY]H+A Kg`8pڪD} ͨa܌;  mhlbt9Ae6 MLb7OӽW_VX謘٠|3azm8g;_vNS0\#ˆQZX^s9.Hqmb٪> X_0AXw,8s^n ("!A垞:vvM[lTYPl8j#QkPal1-R퇽aٚ-H W~gGgWA__7cCTqhTfVQET XfPV%$:yu`nu2a 2,0*AA!/Ꙙ[N._Zu 46H[7T5-RBeV2փbSQI Qcc;+lDTU]lPW\$gK#ěcEt-^,^6hKN AYTRA]"%e<۴+ J ѭ`Eeʵ'aNN]H $/Av -/IlCuܺZ[ypϱ$$)UxQƥgʳAϱ96R6ܾ}{?q!DlP#8IlT<"n+.|Lʲ*deXT 77ZJ{633 l mݺ#$k94 J9޺PZ x\P.%)JjoP0Ǣjl0ؠ^6h6{-X<4`uttdD{̕%yAqLx yH;kبq V솚lm*a`K.[UyCؠqJX%&-N^`C ߨ8"Q\y^uAef7JČ1WgV;}cD%. qoDX,s).;Tw! /AB6HO-[C #\22@FFc'`+(z!1vwFp0wʆ ڽ?dȱML>X={n{ zfя+7-i!O|=XT``6(o8TCl`T0npyh=}}]7+y6pP˫ٰMVgbnaT`({B²# !6T9E~5؀[F19vtGA!GOGA eذzoDlcގe-H۔d6"%yp&B* x!;ybNVlcw;~vCS{q4 /y571q?ߢ+ox6j,ϖJA-~ b Cۑ4 :u՗-[v»P{;=_qͱgGKPd/akXo7TS 2ـoIw oeJWgמQ%2lP8;I_[ؠi62!ѩy( ʦ=GQйwY܈PRRNt?5p \?.zƕ2EI;sKqq NO,IX0YQfOlMAظ^><%Y·?,l) &(`ߢ~v|A[D41ie<,Շ1tu냾8x CjWjGgŕIQpO9={fӫUx Jl(UmZI3aZjKNTA4P=lsA6(y &%U&<[]xͺ%$^(O>Ss$ֶ*|3mn&*_nw" Y)拝 UH#9_:ET^D&6%kOP SsT6FRo.p)ISSGqoIbfskkt s*Jl 6 tDΆv(9 練RRTBūSFWn^G>gi*T UE d^, ՊeG_F0^}3,'+83zTV 7H-DYa Z 3߯$sʋ 5"16hvc&,AȆ!P}UI|b)Lb"5`aQ+ୂ93-$ R`XU}y~+  :__q¡S9Yzy _M[0o=jj|C>アR8v:%uiӹK6r*$-+4c'*'U6(اfIlPYZ"CyK r@<|ht񡡡nnn"$͆[US^lk=܅9V4lx8r_L> Ga冝 -Ypֽynd^ݾP}'Ό GCtڣW߯7 @xB8Nd`ag~A#ga _vǑc&ms`:[`'6]zTG! ߑN#{VxKSi}bWQ -;YpuL׳EMShpy~L޻Fc} >IS־҃=9H /boQe<\MH#FO<̅QjoXi$wo,'qY^ivd`@x8ʀЏk6WئـM*8B)cF*\H;\s&!PZZkmm-abbbDOD95'A<?>gq)_Q(. #f\JέyW޳C3g,r1Ox10moD =22o())UekL7a>">mc84; u,3oK#=6?W6GMLbvro\9{6|4vqbMƆ盘yFqvi 936H QM,ϳ08pe);)+)e_Hb¹kwl19FOL| *V@> `2|4kD6FgC9*ņ\ IaWwK9z17oal'rSiMYйkW>;ivA7[fG- lQB8,!O]}acX>}yYz6:|Kԅu\H,`=mey#6oDrEl|`q!f``)ߑfGGVUm?7+wP} u‹xe@b5`pXx˖-x|guRm"ڮ 8s#D#]IF]M-|⽢9G%d:QĨG" {ڝBlyŃ,bʃ2'/肼;H$٦$/w&VzZƓ}+lbhd$JlPg/z2mn&*6pEDD8;;͝;W<}2 ZJGؠi*8g`R=T% 쓹g@a#(TvvGAAߡ*%Z6T*y"ɦ|wT8%`MCl9 *,t45$y=öѾC禦ͅ"gCQ.( eTZ^!@hVb?ll67㥙YKˆ>";TSxt*r^Vo 6P|o6%4/EfQ a֪Ax#}=~zĝCv |2")>O0ԈtUJ7yo *^Sfͯol`֦Aʼn jsv9NOQ}cO?c`h4jsq9-z>[^ U t7(u]` ML[N9KV?~=eJ^ |f|n>1A]}Fz 8׾NBTbNRacqEc&!bn=lHJJ~2]Gl16(MJZjo7pدAÜ31I]"Q js~AP߸ֻ}~= 4}T(+i:OO.] #~ ;v JUlKoۮc"IX$sI_lPaels9X#R͂'6kڝ* T)+u lzsfff %iK6m暭X˖ #3@$vȰZ. - 6/i6 lЮ75QɆc&LmT cNyO] 8 鲷aVjc$ xcߟҟ3mnrqAvP8 ح׃{RKMI w<YlcX/<ˆFF(`Ǿ R!69>u۴ f.xx9 6 Uy l9::Jmgbݺu5զē4Y:l>x^.܄ ";07%<-[~jݶ+?D߀[|_ ^'3 h{[܅$" ;n0DػfL>}T@r'FID<7786XV|?  "yxxhq'nh}z-!mt,M" G">瀳q~Zg FNRr<0 ,ASSgggW'd&i߶h_@akIG bQFF^\\UT |8}SH[S]p_nаQ_]M/^6/]Ͽx=?l۔eS=W>g`=6H7X[[s /Ip+^3-_aӾ27 "o)&CI|EKc8-Ұ?hъ-|$È?ZvfCTS%Z)L:U:ACl/s^*HၫĤ)a'\Ͽ9@A X>Np|SJ-}۴H7"q*Tbߨ фS IJ VkWi|+C#^>ח_u047ž).$U8:ERS!:EŇVTK V@Z p0⛏ؠrѮط;?H*p9Bʓ6G&V36?P08] 6$ 1((Pu6wӏyVֽ$)uDhtQY]\^5oa 6dee6:utBOƢ}7B.X|>Prss`=lptǂFYXv(;t3<<<H*s%܅?J;c^r%%%ϟ?m6xzzHr0e*PPP`ll,D#=Ox$ :6glhnl]N\TF$ )O8ܼeXXصkn߾W\\fCII5pJ'VPfҲѵB///6< #bܹ=f7~w=ydm۶ -F 0\~ݻ?_Ȇ3}RծZ*:::fggs;C&&&U<=foF6oxχUMIIz*-5666..`HOO),,|ȇZ[G/;44T|2-oN!&-߼UyU[ Ѳx]̄yu qƍq!8-- l})^wtADl"6Tgӻis>㞰y=зo߾zɓ' ^0!"}އ}8|hpiS M 76% @ Ďn @ Ď6% 0xn>pW|w* lx <>Ow&X ֋/NMM1`L,6{9s*++E@iSX 7n Ď6%,rrr^xb+7$vp!\"TUU)ߌ0lRtH$"p#]ӓ_{ӧO3* .{!֯_cх|ǕKeccXhkk#p#W~~ɓ9990III +W:u}(o^!g̘A,2bG6ݷo!//1$$D+)))oƇ iu>ÕŽ;|A 3'-J98r1 Ď23077qž/73!W={O>$r?TƔ&O s܁,( -J9[ p#]‚k՜eee/_ 0>y$HEMn8p|f:))6'Nx4[YY9|ggg^355USB:([BB2ٺuSO=YYYx,X0d9[ p#cDM682TO>N7McJW࣏>U8pi =zis FFF`z6 I`?SZWt zҐ0r1|bG6˜24憅1vvYƦ@ſ ` 6 AfHYpbGH "uJĎ6% @7;B@ڔp! mJ0* dҦ 77r6%`@7n v)ML,2bGHn @iS :DiS ~bGHn @ҦP($7;BpצRT&*`!p ((wTnӦ&++s p_~G%M_uILaCb 7n ?,--yg<&M"J7nxxLƤf7uJj T!99T pCDeb7o $n @6w\ILT dp(>aʔ)uuuB&* FXmsNRd >bHUL` FŋI=n PОjUUUoʫI%y;wCn 딆n\i /qp[ogޑ35ɥ+KK^{Qᆇe]rÞ} R2 c47p1وH p쿼}oxz2J" .Ó 7hHk\wxpQ "7n rOpVE. dV57dKy򊸥hGYjv7:ˬȼ) vA:_{un xH7Uf YUX,hw귶ay'] '&'*~W+Zc=̳(?}m~VvNj5/~0y2.d*2~CBN_bJC ~m}vG73g RٸuRKrC`##q72mÇ'/^t7nx2;|bm Nbu"/!2@?! q{ cg;2ZmUD1cM<#n# `\盯߮t3W̼9^~pZcx/z-<ނxC&N{9t`ÅDGe`Óˆ1V𓚹p}&Sv 2>8}9cޣ#>8 HBB87 zA>xB%~D0Όpڅ~FB6`,9P*27q;>n766~G,--| 79lB/E`?._0M|bo?pB0my*,L<6{%0wbJё&.n>5iWKƔ.l!V3@1N.*6$1[9*A ZSsܺrp= Ǥ`8ZqA|z@yjplδ UUUg:k֬P8}*֖Í)`Ԙi 5q/°fK:oA sd YBXU32) iO2wðo=@CrD'dʠru߿f G}!w@- |k72q!a3n)#]PUn6mFֱڃT?ةp3Wx WWŀiHJEKAR? WNċ0T-Wp- BR ˦~CrGRjBjo~eS#䮒>3!D`rl:(^ ֨o1ցZKZLfii/ ~HCr"6x{xh/iƴ h!G 2׉-ƭ{P`Jp&:Tʦ~Crÿ?*)+WJXy0eΎ ׿XgwԔYCe~ppOyYEY=+c. t\B׍X,ss)S딊7ʅO _9 gp_n/3?fX>#S3~3{y֫#A қ@(=?<檙; '2+w 7eVL 4 IO{7w0`Ap--&@ >^C,xxѓ_.9!+vQHGg@~ܹK._ p}nd6v{kh]G~m%RWcRC{ ڢ}_ 2TOO|AxYG;؂;&^Ck#s< _GN*\OC숛!E7LuJ#ls[lAQ^׏< [O/}t#435@̭:k;U>%ܰa22@@./\_ 0xs䨂3F7Ϟ=[7ASLr% 0'CL  =jccW \tI7ILJ-,/F;ao @@0詅iӦ!Pg;*MϊX;sl dCLaߑw- h^)4Z}޼yCύ3CL81n  >+ܹsT֐&cc111Na3f}tWWױ?|ZZ }X[[#E}嗟}"R7 N:O߾}cG 1dffT0.\{4s1h!&P#G2E@@@Ԃ+x Il_>ǘX3B$266F-eȎpN04ǬXB#I^d b" @`@ .c***4UB Nނ@I [oҐkԩgϞ%CL N-0C 5Uȴ4}"5My訴7xC#>uO@뫩G3gfFӀ4W)zP~ŊdpNAO-[533{aá駟Ȅ6GG1d2z utt={5(G@@^ipjYba8c\b4$= l`%K #CLYD/~cll<C7_ɾ{_6ZYzBBB@`t n [S L_A <駟FGxSF >Em6On UuuuaӭAj#p`<=)|}}5> ;%-%.7DO_pb" @hj.((R ŧNҪ }6Ff kh b/̜9Sۓɓ'kh2O#1@B D7Zi<<OЯ8\oʕѕc?ʭoff6}@2D@=Xܠ#h3thvvv𸵝Kbb"b C̙[oi27cS }ۨ0n~R zAn$z駵Mr+0j+V9sfHHl(\QP-z#ri/jjj6ltLnؿGyֶqՏ!otƍaL={ݻwZhїuZ\ZZ*u oŋSSSk!N:dbbpt5rϫ3ᆱBCCus=<{,۽{D"!c{9st 16 {7oti=]VBLeWVV?!??_g9O%=Xt/Z"zd=>|XK rrr>gy>`EE u),")S^2SN+ ˺oꫯ~:fzӧ,,,HՏ#H$s@]Q IDATss ĀA$q Ѓ3=K]UU%UNK'C(,,ttt,//t <1p۷lu52}7t/ eLL='xBJb2Mqz ׯ|…‚Ab9}}7"" 3G|,@||9s}],066F:ShZK CCCu/Aʕ+Njoo_/tpU@:[oU* r%SC#{n7L(F|PZGP'ŋރ8Q¨ 7wڥ\KRpSDCTw1(>S24Лu p̙gGkW`Ԩ-f\KSt pKae~'O t7__Ka='\#V΂yӉ#( &$$_b4OJtwwx܌F #"" 0 ƼLrhn`=pC[[19EŠ~!&&a~YĖ-[48Ƀ[:276ރM1΢E`Y9q^$ о3_D#pIS\TeaUaɴ vw| l#ƒ޳g`d,YYxtR;Rd>}Z}nP\%QQt0\:r8|9skpCT!!!dLwvvכ655eVEGApU5kX~=9s&Z>SyLlݺ第,$`&Mn@`1QK.ާ7o,pF/^ [lkkkg$&&1~!Fr8TA)Yf.0= 5}t*v|\NoooZ'\l#&Gzjd@0!O?E"x61.l.ܹsi_IXXX #>8QѲGxAj V\CB,ByY:{)Sر@IFF0>iK8l F[#Yr]ca%gGk2xħN W~<CaH㒸\n\\ܐJ\7ʥN4,(apQ 7Lp!3oib02CE2F#7x@@vyzz"""u֍%ANi9s|M7 P ;um3XO> =ܗj Cg,RSSn!bXz 1Almm }F`PեFO<7Hɠ/FEQ#?3p83JPs |-[-zߵ0dĀ0V!a8sʦY >^f u<+WO xӟt-C+H$PPP`hCkV*TGZ pJ%كtΝLFקnh#("##_~s`>m]122 /C7 b-\`İh"0GApX xؠ 8C\7 > `6mڴbŊG}eFh3 ;wCB!&7L(;;K shb0AGg}'Oo8CIam>w? "!pknndI}77.ZX1SSSC[w1%1 pqAϏ`^zIkhIPNccc龐 W͛SkS pTWJRΐ-La=| 7Z[ezab% 0 GZYY!@7h4#;k,9YBu+ 7lQ/^\TC^}LLEafXϟ={6L ΐ kX p-,,[&M,E v-ܠ 05],/mbbbE pAiffkjʔ)`/lFvHn05 P0^xaôiӜCC("`z4 1:@=wwy@ΖZ*C̆ٳgb`ɓ mpqqy'l޼ydoP¸p[߿|U;by=yҥKׁH p"I0AO-Fd1s_~C[&"## \.wΝ1M M-v^e[-=jf;nQ'jGTyBJMICKv(9UrMSJ_,[͒JZ[[T|ްaÐ_ѕ*x( l?'%!bp& )^sBhMJLZwEHwZYVRԬ֜h]TtZu_Tj6EW>Mb~y }>M03ʏǭ -q`^﹜u!nڞ-+^]|%`HQ.'4QuHsNU>/눓8szm(1/Rbj bm?JV_^T^^#˃kâEE7BL[rZW{NPTTdaa1m4z_wck{vEwa-KMޅ8UC*6NuWwE! z d k4L5]~{v;rXeYZYG:}w-v8rPVaŜgKjMLLF\Rgk9;b$\Ղ>r‹^ʊ͗iCFFms{Jy%&O܀~хI]>fjjWT;%H#b҇.\Pe _/CL;VVb~h;+'p!\wʵ7|nܟ}\ձ#z;+2~~~5x ļUU$w& F_ʠ9._܅5)P[/}R`{SRXcnw] p3AO/~ajHD .s;rS4%&=Ąd@*gFAn+ .ܠuJ.~?.o~jAm~NM7,s`p`9p(ћ/D֍ś\x ,H3PЫS/Ť450ߠY1r]AE=y hp/.wرs;/ܛV|#g#{ct+g[̷p̛c1ovbz= ؽv6prG EkrbW#č{1o] =>ErGv/` ,/ɇR]5ܚRtҥ3f w 1  ᆥbl&@'{"p779tC|~~uwn]^祬mV-gRO%7Yު?zѕ;d"+"\S@ixhf⍾>E["DJ$6zSf`F{=-1] :< h1}~E=.N뜎Km3Z!)M'&RbS3bn +3 t@iT!&?!+0 ZN^@̣%ۃ} W9Ym>p/ݚEޅ=,j}bKo9}闤cuoCYHyxYAQ քTk*$? H&.R/7Wu3مKI5_qawv\x?bY1V튮TYs(o+#;EKOƕ 8֙ŗOݶTkBLe<Ugl-f귔z`[PHz(= 6Ą7{3g2+2ϟό+Գ˳PBPO^7S-Tkn:Vy &]Z 9↟Gc-nm `5g]MpKfKJ3Kuk. (鏖*$F{x~VVKNLiR!fUrJLjjZ_ɪfՇp؍9՗n~ uO.dX7*yp{T BLyLI_VVF[H~K~kZVz;6b4s}Z'WN(;)uMŗ{1o]/zK)SɦtjbDPjF=kXX 跼&Z[J=h7D 1###,cƐ!&*rzsA_Q_ +DHU=m2T3zV )-Z3PUgZuVX`!s)pBo@|x>KBvu&ݡŽђRmy W3jahfGcK 8矹KN, zwB/?c'8T D{K;ryW!fN]^}?wOLlgޭI)!n*jg5:eU_I% Yo?m=yC&^%˓d??3?UOkPȋ,C;eAk5.JJCī ? iZp>k=P1XV+Q&OO*Dߢ5ھ4Y풎ngnd#Ka{1omEpՌ:v?+T-PJuՓ@IRϸfsY11,Xa@o.g/%@x1S!zʠY=ZgE73AɨgeNė W8sXi^='۲;rz-J7FƋ:K:{R{T)GMˁMEPw`R3iwd=Kň6vۣ*~ )_ڣh=n|ھ 4 _/uG\Jk^NY֓"SћRїZٗY#_~IYIT֝ nq[Nܮ[eV!Pb%XOnXd*{k`խJ8MAMIrZRJ̻T*H"N}R4hzU_*j#Zv8LJ=囂J ]ʮg 5M( qmiB̞;}U o!&zB3BjHiWMpO=ǯ!D|BjɁ+]RdvNk=t8~|`n@9 hxwLLLkee2Wo] wJU ?Jjջ NMy-,甘=1XI/Č-\O܉1us/duIo%ڝ| 0fg+/$U[ߩhpjrmqgy)LEy7L,.R z-M^<~N˱ۍoԁ V8~X^/;XZzfVv_ngPQW4%&oqEH|䮅ֶ:bv_S['5Y7иy4l>xܠl)33_4i>+sJ½%n@=.&(Գ)5գݛCg7i!fLIߵbg=1mEYz3Z0ְ=% {CE}aĔ9rG o[t)8<Bgu kb$}MR< WQ=175 &,SѠ>?ܙ7РLJh)[F "#p`tN= .F*ܰ=XK1)Tӓ$ܠ-n0 R"$-70 1n0.7~~Xq }QjrC/XV8a`WX" ?_ ~ Yƕ WI=.n:%nW|2A~ * cnGx8L}n&w+ܰK'\zFmn?n0(ڪ0jrxQnPOq kKϪ POi7hU8`4ju;_dIh}V åT ߉7 n=7|3~.7О&Ms:'.ܰ$ 3D?/X C1sh"烸aODDubzSO5Aۄp(٥셗r;0騸_p KǝbQakp E0߹~iN +]Nͨ5pjFb=v!1 3Pdymq烘}3۶K jBh^E{#yZXFh ӫH5Sbf=ȶd-vdg_!6stU=C$p.@(550)1h? K1|ߺp~3 XnGvu^.bn(> ;LS[ "ʧb?ؑxZޯxosV?F/Du-?wq7s@=qe(dsxn@]ڡ+scأpkG`:T,tu5jWB=)+?RKJInCrSМ3R= ӦPO.ZObN5f7n=ܶ_nT$8? FPv&QfEtPNov×ɦ<~]-4@~%g]n}!a?{pALOn:ˉy&f۵bcwT"\|2MJL*MTСg[>Q'j{x~n<8\߻n 4UAL~7n=OoWHRybA*11 ($q\m=tE{#dfפD+W8r*ٗ1w^n)\Zba gaݦ1e ۂJO(?H ]*95ՠTzcIi_#Rsth79~:pŤԓL/8PϳkRzBL?z^n;JgJszpO=7s &̿h'O/bz@=g9w]5Ho`d@=oWW=涟3J)s_w \T1YB G N(nϻ\cU$<0I?IwlKEDXFǔ-9>Ը7ܹ D~+t\qZKeۂt~%fMV;'@H-HE CFkFՔ7'bG")1EG 1ߞ<-1\1!Wo0/ 7KmT^IJ,m!zo8#rم'胘!~>P!ՎKO]/9hb4 1{k8.lSo9wCz%Fqɸa":'{زi."J`A3k\+/*;/US5OH@L\ 6zRMZ'lT]J5Pbս跗vxꉐV+ˠUOe1Qfo-B=9G(|\qYeGѽzc@=3"y)zgg?=Dٯ`x뒋ehM繛eZѠְv᜻]Iϲ;z"D}e9,,~Vm_NUOnUOFygzi{z)3w髼#Nygri݊4Yw#Ao`5f^L:(;stwx#G_kX۳Ɩ:d5=Ž1%} eԉl3N[ 13eebfwwu˺qgZE]nζܰ3Jv&+x7G jn iM-nz:SnߠU۩ P38V^JU {a8g`"X ;"e'kJO3P|܀~`qr1xPOrFaQqG!͍;nSyq4]9aWtZ#{qHٱsõb*9*npKn o~]-sJی;OZqΨ)?Al8'o: B3 "d,:0aOLѸn0p|]Z=7) )bpoWtu(!PHEs|um4!RaGT:hrC}[|%GܰeXnC}WP_>s;t]SO .,\-9Xˍ*hG4()Lj:)ZKm~pgt}F BjPJʦP ҕ3g(9rBqz(:=U[K o\y I=}#s-d&POEu+*i@=i,]Ln%q_\ϡeԓMފ[6œP.i*Ári>}b<Ƿ1¨ʎEIp+=Wv*~c%ËMy_ZjSw-%'bKFJv^[jqrWV}aZh:ԏ΍ITm8Wn/΅ždO`@E"ETE 8ax מ߳qw0{\c۳&TTTIaB\p\?~[9vUn=[þ{߾.J37[s=at54fr9Uu$An_/Նkr&VeӍˆ&1YNKY3 ڍ ٬wر|Lj`bʍm+G6Zn czDjjl`3K2Te}.W{|x:)}}@?$$И8]+}|x&6Xgb(Jm8UiKbԆpm@Msa!9?Zji.WڀшcwK"~CKǢL0)&$Mi/m@x*݂YtoSm/ lҴEMµ!q^:zȴWWä E|dȴ!x`mT_mN 3u6ijDg&P0鿰]YqFG3~|i|IѨrǀs#ڀ%pmY]6g!i~(@PP_ )j^a/<h& +,mµ!~Ux3nOwS O_0/flGqҙwΊF6 4=l1#Ӭ@mNo'Y3VjZF5ö!Smlm7Wj't &hަ3lRZjͥ*6VKFr yL}txj'jlx*l}gh @A<BjAL A3!Ԟa% H zN an7 fߡ !6#Nh;؅J4꫹Ϝ˗/`HLm۶E-[l˖-B~cHsrrXk֬Aロ(.`&_8W_~u#.0þ\1oذao<niܝ:eɒ%P\^j"-- ^/C}8Q+Z4UQ,eddZ-P_M %mhZoF*%: b' D>,,LP`F]IdsyvC.X`ƍ_|Gx޽ׯ{:7o @ /xjc\o,.ӧO/_>G46Kl滲i&~(a^{5_Wձ9۵k|/))ORCrM| ׉!&@o>4FGGq:t\Pȿ)Ƿ~T?~^󞒛6 ^{ Xvd"ep3J7 ,,ܳku~H?vbkpSnWi/ ) +by _{X'!_5;w^M =.9&NH&giш7"hrkOhìS 7xZv-%ŋ9w5]:0hN&.(FP pweCL/_78p˖-b҆Yo HaA"h4 *lʕXyq?zj*~ ׇp27" ~ |ӸLjk";vL,aaDA8۹s'_'rĮ L^ }_7C&M$W^y{<Eۜ?C'ܳ \8׋9]7؜oXbPĕ… Z~o)B[rr22,F{Qp\6V$w׷Ȗ,Y !|йjW #tq\(3#vP1xa׬Y ܽok͛8\Η60q2lrٴiן$.9#߷o׳ ΃n[lA;;3CP&CK][$γ#}(0[28j^Gg~Dz:PEyˆ1gMEFvj\zZ 7ƒⅶlu' +Wu\}iii䎄ILLǜs72,99-%%_@MA6AA A s A 9'd7A@Ax@Axa% H zN 7A6к BKJRz ddd!<<|֭4AIXXX|||auAh@T/rڵF1VK.H$')**vjvj#-#9?a6A!iAA@A6A AiAA@A6A AiAA@A6A AiAA@A:IENDB`glance-12.0.0/doc/source/images/glance_layers.png0000664000567000056710000007753612701407047023040 0ustar jenkinsjenkins00000000000000PNG  IHDR%IDATx \MY"I^McHK94i¤i4Fzq4 6IBIE;ɭ%nr9$Qs.ϳZ9A+ BU!@P(d4 B( FP(2B( BFP( BPh B!Q( BP(d4 B( FP(1UKf^rђW-DגsUY{ctecki]kYZyW ݓՊ2Wea7F?5,;-a>;w@ϟ? H-S窤ժFU[Kxi=/@Kyg Ҫj0a"3窤G5d׿x0nכJĘGŕix/)>Ws<Ǐ+**RR[䰮c H\Ks5%%޽{%%%uuu7[P;P?AKYJ3I]DUIm$^9a1}(~UNFb<$sˁwxSj^HJ'77cs>62ش[nba-s5&dd[3*21ZbL[f]\*sr0 ~\f8ӂ>7x,'V;+Z$ʼnR|ݼ 8 uq;~1vخ V>z1ԇ:hm j?Wco5GʤGh1=kGZ%b3`Ϛe9fԑi&c={7M#-YA=pHrB;H<`6H9cۼ%eF#;5J2!ZbL嫲Z%b~e~߰8XLv(l6`t}I~ci7Ɓ*du8D:{FFwхܜ 2Hq>~`,g-Jf]T*suDf@SMH2\Vvp4k0rNI\`;Ɓ`UՄ/)sr2]PŽB](]bhfFs)Sf·9=XYu,+]٤3n7 J)cBZT0hm'ZLd!qحZR\ Q {N@|>zpuC AJ`dߏ5G  Q!ucIu:^sF!;ܻ#%8­zXh H}xxL;z5T^o2wc:gTmEI:|_ } Ub>Wѿ 6:iJbW~ZEN!N1Qgd6ځ91`mG3#?ohG?vRu]\~{FUr _ax-TV >OAq16pT6m|C3=$cn-(i -Vlkø)&_/Gг!-QTTԶo^QQ_*s={*=3T}CfTKS{&"ߞѹ[ yO#Bn7=t}Ŝ[|dSˉC ݂ )GO7{!IL۬pq:P,nKqs?ѣ0ԭ[7kk넄'Z|0W`tLakV)By!G|ݾB@ѯ|*8zI \ve3プnsHs7V@ & [7ӳfҮax#$sƹz.12^M|;7V|:跃lG~iWaپn=z ݷE؋[U!c8[~xRfl;\u2zȑvͮ繊~\=hbWZ@% ƣ L!:9ݵGh 5h7DDD455OZٹ?3FFK* `fLmLl-v񨭡dsǥbkGo Kv} E1z0dtf8`3bV͜Q87ò7?o})U☫6vl3d3/3#JAIURT?9 e43HSVr}b Xãv[];==Ç3qF]IŮA_[ fYXظpw2-1.w|S-0EQ3 Lpδek$Ng?-OGA!dqC?\hLfŌB9ފk l- )SW=tO8@r#EE] 4M fXkZwx޻ê 6dB0R1aG- o4>|NeT!5׍>d42~}xǣ: ºh^1 LINn9@G 53 h 'ÊxQK{ؗn'l_Bh=exEzSF#&kO11#?P,9Oy|fYB&β%M~}%@#!tYt/kGoݞoE&a&Lx֒-hd4O2VhJ@XSF/=HE޼v D$74ϴ֚`Lۑ JWq:CԆ2@FCBgj3P9FFah0>7[DS3jZv>g86r43 ;Ҙ̷#VzŐkz/xMrbot%0|~( gجQQע7dh42d3X>Lj)a;bX@'ڻLFZ| EN~+08SPx7HFtU4o;\M`$0Zho`9d6 ;h42֭w `m˄5ݸ;z\ʼ7BWJQ& 2FF3~;)LQO\뎌FFEN~Km6"hf4-ήD8B}- ^{-))5d]r%))ݻO<}eKK 2-=F䷆⫽|N+]*}~ZZsY=a 7O{eswqm'ϊW64x+` #UKD^ \28Y;_SZZ .㹴-ԯ^L nS^z#4&|tFaf322ʤќWŵNY~XũmllߎpDՕ^~ڵkqqq1(I)))n***U{I2~Z z Ç>0uEp{ .xc+;;;333===% JKKU4,azŋ>_VXp CSB{q۷aucXP C֌paRTAAl+''Vjx)^>z} Cy-fQMrr2 >} z%2Z$FMM )Jg ={ѓ/a(ofTƦܹs0a#Eb^RohhGILw0I {}PûTo'L,SUN{^Br_Ayp% *Y|g[Z}4)11=iUl6;!!!++^f22-REnFI^tЙ[XͻD=~nƃ=GBT>R[䰎!VW7 eiz~r(97w CPkX},K#Giɦ}:JNH+$a5 5]`{F9mw]~::q|mK;exNh u %?={5BI s__P?k _z$l㮁=y'@Q{xkM1I`ΣvYӊ1F{Ol1΀v^g.]Lo=d4 %i~ͮb-kڼ?6n?D0/[&ĮBko0Hidc=CS=zQ{zgMod4 %ij+]P^)kڸ?j)|yc1{E?֥#B{:LM>j&(ĩ{5 e}D@N_J8 <N/>_M2hYt_>>B{32m~Qʪ:*162@F.]P^ʓ6e7O(>IZBFPGϸYkʰ\hJTPM{»CmMd42BF{Gi ,yo=8e#$`ϳ$?z`\Z/_/jZtG/:B! d.J5hWo!#4 G {^i-{$5A (Fw-م[Lur3;n {/ڠ=|Ghd4 ~x|=xǣPF: ր4u^3p_}8AfP`鎀lX{#(2lb8Py8,faI+?PvFBhՊ6xF` >q-i Vu(Sj1 "Q(d} CLF/Ƽy ((2WgZkM0 ߽CFPh20|0o̳m5-]>gD6r4^tY5h -_L63T{9ph=SUU?.dRv9C-BwDF#Q(dt[vvOlK9LvFFPhf~f( F$=d42 e>V Q(Ue#7"q)> gصrd4 %aK|6ӸޔbRNHHy(X% _T54|I.zSsʝ*Tve”o޽y7/Ò򚆚L29WZF:][ֵ5o0̓Ʀk`ǩ{2%%zZRgI˺/^|b(%ui=/@E7O (xQZZZQQٳϟÂZnz QңƗů en%-ϓyyy]{Pf"M퇵w),,,//ohh42%=}wAy fg=i'NIfQMBBBzzzvv'Ojkk92ս>]{Pf=-{׳-XmW`٧vtWEGG_v֭[>P#QR7Jdȉ=z6BHH|5 T<ͦ7"$%E<FIjռk'2dޓٯseiKlS6}.\d$+pr#S}dy'=kޯ]{P>!k}z(B꣞7$8Cbv~p8 P^Xbægڻ}C+?%#GΓk9h ڃ2鑬d] Z on_ޣg-GBIlib2rgInTFI22U?PɄc Z)lgdJ h>Ap9Tv`݁I[;9ntEFP]{=( e·Bi033X-u#uM'`BW*ǚaZxv7:OAFP]Fr޵%LXW'f/j*N!N1Q fY;@r)`Ɏ+,FhTȽSƻG4$q2*5po CEyUFḨkʐl4Z< AFP]$?IF2'Ñ(dtz|gj錧FFP nH挛a-pgd [~wL@ hd4 %퍮]tzGv|@hF=Lmt L[.3 ]Hyc,~O,ͦ;idg`_z'Zw=?71m70#M>{bb'@5!AM9PIkڂHPolL =JC9: F!e[,*͗( נ!#4 1ס^i-$d}S8BP ȵR1Hmo2ZͰr;TZ@jkefRiNX-Nؘz6RPRC|h2쏯xzB[QXh+fi-e{a5){O9BX6k芻ɰ1j,,E o);cdQ#;:^d4 q:T=\p@%Je&v16Zh?iG6|Q,Zћ|2w皯Xk6$Ѣ:6AF %}?7lAF#Qz2 S= pǸA*j>tq:qm +SgڴhJ5$];-xl6FF10pF>UDo|ʸ?NVm!#4ɭѕta#G3 rڻJȸT@I}Q 4f!uMZ՝(F.k$ Gxu- d42l+Mx=qFѰ1j,Xr&R[Sݡe XcBB9`7Hr*ң`N!uM[ڝΡEQ>I3"(d[֏6&`5P5-ߌǘ.I8J~?BFwizCmcF!;ŋ::J6|w (d42r d4 Fo| nx.-qEȼW^z d4 չK[TZWnۼyRe%EDDa]XXF:QͯO ?@$}}u7r_x166Bu9Ǖ0oɁ"',(pBtttFF2\5px2Γ塡W\!{(T'2:evy+-tdqBu_p=('pQ8D5y:O+¢h\GPܻxYO[8e fZsG]^7KĹ# T>~,Zi;潌DyMavB"y[qҥ7n:PNg7JZ'~y;hP_V9|gsG|;B{]Fr9pFq;G"e/u hΓH;~4 չm*]{P?iNbm $-&L5&|'Z-7̈́W9Z̶ ˬ3'INno= *6ûGF&К]GBˏ}덃Hh~%xV?8s5[ԡ 0xHp`u.(l! I{@<&sࡈFdXijpB.ׁ6IL-7#Qc#L:;lpX{:d0i2T{ _M4B7HCGF#BGA/4$lx?Γd4 Յڃ2Q+xj>|k~H(u;"2{o"F[c =-NfC_sl+}l5FtBj ]odST/d0Yڭ=S#0o04nQ#I|O6rawHLI|62*<(]{P&-æhݭMjc$0mܑCF`$g>U`l4wk<)!<,hJ| [,̗( ^凌\7RN/{$Jk#ʐWQׂ6Ak10 ^,׺C~khAAM` t)oOlpc@&l˜![AID} zB{d evRqh_Gs Hc'0<04 Br˹\Jaf{ԦSY<,hJue?FQ@ yt)G 55&T>!IX;*A{gxBGlYh݉$a G{dST/0JRXqҘp5'ÂFė&kO115k~xR9$h‡Bhe7ݣtYGxO?l*ح*# L2~ .dKی-3)jD c&}?l9M>H qFWZSÂFĔѰ@p h.,+|Fo^Lmp櫂}24F/0dyKn)h2[5ZfwPFEsiic{T 2SFd]9T7[DWp7o)Z^zSX[L5+S= X e7XgجQQעw``h--;Q|Aʋ]Hv'9z]K(, ͡YSÂF^` ﻵ[ihQcŪhf30_¤L zriYvh[HBIxt9{kYh5uIc&|;e^2zաr}>7dT8Y[BRpmvd4 %&8F vc^ڮd|:&&IZۣA]wHߥzD}lƓjaAFPb7S+KG[ajֽz+zR<,hJɯ{~q_m6af/_NMMϯDFPgMЬ:CCP7I__?$pLQ```XXXBBBvvvyyySS25pI_ܬu"ZSG=sLtt۷KKK(TEKQɹ NGO=r>h4;~.Љ^^^'OyfII 0B5V6Ohȼ_^x.6cGCb}ȸ=Oal4d&eʁa1+bN_=r:ǏqƓ'O(TGRϮd> w6ViR9 m'Lq":Tuė K&IŒ[&O739w1_ijyRp SsKw]敗RrϞ={ҥk׮VTT:Pҳ&*/*Z5'r756H:85hJ'؄Xo!]]nӻO߀ۇ'A`b3yP =Gü%h畐K/&I06 Ty*$GP_:# x啛aaa߰P׌Cn⃖ĢWf)Wm>V_nC5?La7\e:fh< {NinYnvKB#ʪjK$.2*U={}A](m3+lW VV348*#_agV|(5Gp8C3M Qq cFu:^&'W Q.ct#n9ڃ}i303āI%74%V PiIsb %>`pz o,H$ Uu͑@} I}x;tDFFP]F2޵edl9"Tr{wl'$lF-d4 U*kz}AF$Bur+Oy!hHy|FHŵGkh(yBFw{6M6I42BFw}(v :$G.I42BFw h?? |I42BF ]i 1Cf i0).d d`ȰVy6{?gy|,X65_kGiC@vCSC?nEN~²}A"(3WoyO #&a kݡЂ/uPTLՎ34L6abkmhh KvCH*Z]`Є8ӛNe)(z ihd4 %Cԃ%0a!w)DaF^-/ ih@2 i# [`lOnkzdb{hJp fAYhh(NOOutUUetII )Fɜ9'ui}+h( 7˂###SRR+++#aU#SSS2%sjpASF4'鹥AAA.]JNNFWTT4553޵24Z}(++p>|82%s{^\{Pf>fI,S4Sڮ<`i63@y[CIpuuԄxcccd4JT-]{P^"?e|@!CUYFe@3Ԅʘ;}di:Os+;7,%%%Zcc#2%s.x}AV,֝z0īӛb A|~+fZ/;sVF{bC`󊠸l\6~'l ͏q@:Ovoutt`ҧO(ҳ&n~ڃ24{$@`OeɨUʟV@*T8xT^2/g) U:;}Cyu# :PܼJ޵eCiWYV1]`~z4"g/&Rt/kH_'48ݽGOz6t\^KjvW펷`.2%5~y}Ay4{j>wΆˡH yO3$1y 6 d6SM mWnHI=?,(T2:?.P=v"k@f7|puRsmH_Aqq6 v*a=~&X 'wсZѣ(SU#^9ڃJ4{DLdG1ܨPy?Qy+or"BeȂרih74%޼y\HHH&Ch̩{e}iD@6(?y@163LU7yg:h?v'ouZF.]{P^ʓE$^iOʐFf!d@=OhFɺYkʰ\yNt~. OΓ~ي+BFdNżkJO,w 'KI@F]0:<<P_ Q2')=SK ٷ9..uR;&dθn2kmffPhJ/v ڪÌf6F0F/313m%$iG̳cdZeϳ$?z6*'6-vl;|f6^ V6xYGZ/f߰vڞeQ(c4- iEiI-?dҽ0u AJX=ڨ5 kݡQ:h_G@\7RPR.ȣi@|i{veF(d?fzЏGK!u>AcJ*ť`{DzϰY#Ѱ֣'h=W]1wqF:88hiiddd Qhq@Su TZ/pxR9$h+x we^c&;@0iYKeqGYftii)V[[;**jٲerrrh2Z| MdTA#~Nnј|kC!_h5ۼYBv; 7n>x %KZ#'q(TW9732H:}9[QںH :$v*i}kG6WPsM.dy{Ia$.2*U={}A](a𑣂RJOk VV UK6?Rt zGr+9՝Bh:2r~3; 6pi=t\FэܻkK_ D:T=\p@%X~Ì^}86|`<Ѵq|x"}?7`ųlAF#QPXM=Sh^8˖$0u&>"a |WA=HEzѕ&7O=Sϳ@BAF#Q`\䣶r ѕWʽ)!4݀a#G{S`4-0ӷմt;]729v~idDڸ"H!aFE]n (d{6 0=#MSWDf3CnAʣtYPN^E'` #_|:㌀ѳl1_yc8># 5^E Сld42g ]K!rQxAR Lf>{b#/ꉡ <&7{~423~-a.ҙq?\뎌FFGΰGR(]ӟ~e9Έ/Q{㋜]c;gF!/(o/.J%A.+x8!tN9+%UFEE4[+ZZ.Z]}yի57ּּ j.-ĕ܌W ###_^XX(snni}PznC&R9*n :]sX`tAA1YSkqmkX.<᝻g0k?K1tS3f͂ .t`tUUl1Ip<hIqY=APѣGO>LjZoz4r8o5o䗝={6222%%%//R]{ceNZbLZֽ;wܿ0][[py)|C O-=sLDDDrr20I&y2X@y -1g-9Vjj۷`Kiqft oo[Oev:icRĘׯ_///oll|83ZBo;J态2;SrCCChx(sKxñ2)ZbLZLbZLL ,srr>}Z__,{!mmdWmuM7!W)3<9",, &yFF,~f[X;P^ӳv%>5::fgg?kmȗvUNL-f[ٝK܌-\=I@@7l M,[Ywxs {BdyE6]5xhn)ɹ߰ݸqC2-1gM-eMwk@'V;+TILfDJj:qm壏5 s"*vM7b*j gڻ}C+CirejUce#Ę5D1Yͷq,̌Df^rWȦ ޣgLv` @6rOh dش,;ni'd6p*yćh1=kJkK|ʀG 3P$ެYrdm8De操( R:ɦuȦfIܻ3$^NfUgtQ57ؑwc7Ðӳ%Q~.Fx@ffptfױCU=!k`jdSh$Fœxo,iޓmFTq+wb Ȟӌ,V``֩;sˁyd ġC e7Ä#ԁ6G -g2k}7z|ъ =#x_ij^ ٣\\9̊o$>7^0t $OCo5ڭ:]uIX8+((|]9Вk:OniF'=nvWzwѓn?ſW_B`S~Zo6fvj>hNJ5}Ilϔv|@n V!f X$Chgњ?nRp|ut|CK< ȨiFwHjMb&P$g T~ CUl_?c0 J|@A*jҽ2 48L1u3#gȕTκ|@TUUotI_,)EFwf~))yI<,u8򢢢;waBHHӧOiq_\pի)))L~I~,Dthyy\Hca5+'+:A$kwsz5[œBlBɑsqzj~JOBtddieGs uTJ\wsFd;s;ƛ5i?y$$MFl~ )o*q#`i=ҡCRZ>+OI b4YsYy£gmpj>|72E;ЩpW$(MF\@~Ljɪm>,,|0AІ_vѺ)q?9}MFl.(uT1疭Z1yo7IA}mO4MF\W*{'H_?!F5ecs[?QOG&#k&Kʗ_T=l#4~rMFLFsDsMFn4+Xz[HKOcih2g#΃Xu΋Fձ:v]+.\1 M&##F?܎V6-gR4'FtAX2vʢ x}%27ci?'{h~R+hrЉ(V߳@*vf_O^fV?ahb41Z~s8;`4,dtrbɮs퉨F˗ց {GЋ]Gagh0w-H៌ѮZ'}C y dda4R {K;Xz~Cmޅ*xM5g:]y0 b4=g/wNtT7 (xbw_偾jG;sc6ilWb41LLM|9@YQ޾h6FK;4n 1MF~y wts6!-9-M|{y;_ٯŨl2_j`lZb41-#& 2ak&eApd:a&b5::6nf5 Xh£lb41LLf Nek ]i)hVxEt10ddbet~ح]|`l;1F2b41Z\"ԛZdYרqOdگٗ~g4n1D'F=U%)#GDҤ 'Jϝ;wʕj_j5y5H&Êʪ*Fmb4c,~MjvqJFԤ,Yii)"r&FfəVVVϑ5YZZRk Aj`Zib4Ӵ<̟ HIip3E:&555==ݻEEEtUU1Ln+mBn DYttt\\\ZZZNNNIIIEEhi۸5Ϙ"SKbT6Qh2)Z,44422ƍݻoɞx4yɮ9-r.$~cع#nsZfG0wTTA6|G_sAnv`իyh22vb`U=<\B=nUϿZހFP=Pa ewPW+|\P_|9999//h22Zqm'nAU=7wZ满ɥNK\>Z;r6e6?|׭^,S_ 55b a|lo5vDgL0zO /eZrAnF={6** %Ff1&5#0z.|mYz;wc]^ẇ~4aa"/0PhSWN Þ#8-JY/*iަ8]k[jHf4wXo#Fc`7k)߰d"}PwWzlRFz@8_e=54h{ ϗjiK\j1v5ѻwyoh hx|#[mBRYew86mCm>9mށZ/Qei,6?Uw4Fu.ZL>{)ke!ݻj|FH ;E?+^YzxWm-LqВ *t3b4Y=,:E1y U=zwdM_~4&2ۢEˮ :t:|$dZ+sl?QÁ,e+_9x.R(Զ]FVl:?q/`вJ)s. V֥o7zDX z 5@._:j=Kv FR^V6j7cVoei4޾C->]rc,D1VYK#|`߸y$op7[~b_-B7#F"р!l \ĮQ`+v;,\ۏi!s۩_.e ^?zn|XQ_NoEV~ R A7O!_1c6?d߫7?/d--c',m8 [J&`$ ]n@b\PaMl5bc z{ȕ[}TQ?#R><|7 'd'lu\ |@H Eu6{оcNtUz@\<oXYGgo<~ |͟225"UdxBGɈѢgt M;AZ@ދ]@fwZsXC |ȩ럲n94qPlܵhs?JZ'}=լW.[@;@.F|}c=^2M ̬&1݂M&#F,};/+v# ڴ Qb4dػZ:@,8x2 B,ߣ7? 7틝8]^m dkHpdh3ڞoѲKL,bRdJf-Z9zʢٛ@3W`m4~+_9]? V"4 YHmaE!11zV?"_ܽpWм`/hnYl1BU7,G- ѻ{yZ޽#F=:> %&!bE&[v9T6&b {$^ُfe44~)_X]XQ9v!dGXjEb41l2-v{ϫ}@6/tr/2 uDk[U%ڴk9=z)áYk!RF*A|9']+,S/7jEb41 =+eߵPڰvf2Azhb41U"606UZmhBueۅwNh22f럲Ykٯpr;䀆د/rȈ$b41L'D M#F5⊭##kbVsd1v53=RS n `T* |2 2Z.yPQSP{erXB^UULȚV0>Sf~e̝+J#SÓ dHFWxדon&e**(+2o,~(/|:s1),ɾOpղ>|X^^^YYYUU<.[JC}AHEEEգ橓~.)SMt>>55533F4ԡLc2:~M3FI4W-ڍ+W M(w\NCUrfe%[gm]hįZ .DGGrss(פ1BCrԂ7ndddY򛅊3ZI4Wȳg"Ld߯l:*~[yfĨqϜyG<=>gfַ];f9vS̑ȤӧO#A(}֭‡wi+$ш_3/9s&<<<>>>++ qMhr$ wqعW+^eP^&jk}Cȷ}':u իW {ř`AU;Ҭ.*-1&Yˇz^{玅RGd+Ya/hki5v⧎N-*9֎%_>jkD)c&N  ;y;q[m̑0ٿoߓ'+thįZ`2^n9̠a|9۽g/[뽐 eT;nt[u2x!tan2c-G ]^~d9'UӁ_F;s$4ǍU(O|t&^MU;ռ.,-4TG3b$ۙէK &?GN]VQ-#3q;tBZZ| -[顊j&rrtQ!|j̑x13f\8l$F:׼~[PaKYy+'uꢿ?dyye"*ДY 47TR59-Z̀ ]v#;G-cpr#7yBL N#FIh얕5{<-8Dzw Cz_Qo`b9;dh:uP T+(4| 0z88iK ֺiM^R})GHP/8lϤ@39[Xټcl?ho!hd%G/e -%g.pA,ҿp 6 z'D9]fĉ1/< ;efb?gN%V"Ssϭ}{2\(Q/aΨID/u_<~[Ltf-_PrM՜gqG=}]̌Nʓ=:lO%@39Kfҡ<#?op^{jQ./S8̅'cK+saUS;88hP@ WM#WYwb7L0+uzw8VXO|(>~6><`ye߉aH::23:2Cz[q&l21|G 멋ݕ;SlK>f+2qCx#{"kb(>~ՎJ֭[gaa1c4{eaH:+^'F?5Fo:|zcW{۾i˙ 4HBZ>cPѮ^cצBlx+=효ߏx񢃃C֭њv|̴g 85#ghb wvaMwZhf,`m;eяJF|<!)qtd'7۪9Ƴ$<~m T詋BW#AfWyZ(9P2T]?gZ}2 z3_'}ChNM8~ T 51M"FR {Kw` RRքQԂo=ÅC4ڢRl$gZ$:ye"kG\v˶#4F¸+`{᧦ij?ZZXShb4^xIЉjף& | Tً Z;ke=z" Gx+0hUh%oѲKLU #.@}-"1lٮ% ~jjǩѪ5uM&F."M!٪A"$ בf125 ZZcCWG/%F_mthTHZFc.7+S4NH-V-kb41DY:7%BF[s|{,rөlN1lASkb@'"nq.ݝ܎51%>xZGFC W6i^j*51M"Fצ[Nx!d4@˰w0,0w-;d_^vxr=p$"MTMq=| KPAJF|G<ۖ fn^㌕{hq.S4NDifVj4uM&Fяq@q9ޞW|5|*CKkϖ3y-kWRSSE vM&Fэ"F{ѷݲ=zFma]$bѲ_hWy.rp~_ڀ M&FIgQwr񹊓!Eܧbr戸]RTsRFw]xBM@Oz9]U-L!!fo!"ftuu;St?D={6"""!!!;;Mhxʳ0$A YKCCC]vB1޽{qqq0q J>}˗rrrJKKdsaHRdZhL'vׯt'F)W\\)Ig Y2`.]E_VVn2q:111c  ]2&ZXXؕ+W:1FȈdddddh22222b41MFFFF&#####FϕX]1IENDB`glance-12.0.0/doc/source/images/image_status_transition.png0000664000567000056710000051050112701407047025147 0ustar jenkinsjenkins00000000000000PNG  IHDR@bKGD IDATxwxTu?II&+$ЋD ꮋe^ˮW-Zh"H L$d&)IN2~=O?gΙ7 H@DDDDD4TXgϐBن6 V 'Wkr2)eWC_\&br"DDDELV;NַΈҦ7z6WV %L԰s "+V1Q:GDD429\nj p"zJKja5t*⯆:flQe UY M:\J$H ;!3S"1+5cC *{"XF6`cQ5vi@ Z)q1!Ȋ#%L D2h9n7JM(3yM;͉@ cp8,ɌC.pr]. ""aΈW_*PЎFGcaF,Ea\Ld+;vhu T-p"+Vd'aev2FG| 0Tn{K2 d0>3S"K̺N1`}a>;ZFS7dž`Qg(ibG$"DDD4Ld -Ƣjh 55`FJe]tQt~&mD}V40m:_㧀>@55*")Tӳq:SOr؜.d`K>b"""YÉrKElĂ?;7f%^ҙ  8\ՌCU(7c,噞%h@S05_-ªi.t=t?ʚL,ˍs11.o!"&"oDDD4N7~<OkwBtp^N7=g7GՎLDdOⰛh,(@kq1"Iw>W+o6a͇Sjo.貱 .n +}d 6N39i"~/]h8z3ȈoHu<kQ,Ȉ?F"z,@DDDdӉjzw'} L|;K)SjEH;kj|B"^}fh]bvZih$b"""Jug(zL)q 0A1?48X-˭Τv.l(+NX"QX߉ EUm31^SlDLFĄ^`s\h8rM7:{ uqQx\%. շ[ͨm3㋇azrv4~~(u;`l ﮞ[&%z͖SmȊ!$@%BR" X@a^ٌ@I55YYDde!:'>Kv9Qw MNYxP+d9Y߆| \?[p[ED]&,yybE{m8І^ۊ.-&jfte++Q{õXB$=gC^ٌv3=|^yb""" [kqk[$3߷&;Urxw YvX?%RB"J,@DD,R/yyæc>~@l˃h$c"""o I (bgWH56~C,6T?{HDDD>`''" ˚UVĄ46& LV{<:^;/l>T",@DDskQk5c<[Z Y"%36?h<܂Z(R2"L,@DDSjxoiuA&R2CxcX0n7 DmXg[xkO1?*g^Xk?6) LqFn1j=`%&VKĄD4Xg?;cBuNH'J> r ^1x~&ڻmpHɈh0Os>{z/VmLJ:~(L}xZ֞yLpnWw1 4 "" [kQb3=q*蒓EJFA R[1ټȯjF~UHɈhOxgo f$GbtDpϬ{O#8#)$2B]h\ng>3%ctxwi@퉈뵚XWP1g~v'2'L:Di~9N@b"""g!Jp[Nͽ%J\) '*AQxkepd*FD424˳RʛM+7 $mh NOVbQA7:y  ""jM}+=9ePA+R2tIɐHhl,F: ""j Vp8ye&@"B@CyoӍŵ"%#WPXEcbᯔNQ]*ނSWވ6s,Lpl(1  ""ZvKxٟG`xHh8A.acgY<&O@ّF ""Z :x}>늪H$"%L*!0&.J` rK<ӓ#h$c"""t fӒ=ePq)kMO@}.Rb"""_Ռ@cu=3Å5-`pkٷ0H%W5 yV [+7t?Db_^D4Yv*mv\.n z.k ,@DDuj= P eF"wv>+DD# y} l#P_3@D# y@ԴuC!NHc4O@֯gmGņɈJIk`wQn) .ԅX}ǬPh$b"""sCy.˄NH$羟ha"""\-egBtmFRHh3b\cV܉?rR`"""#J0&JºVtYn7m1/k5#{ͬV8,~"ݫYF0 ""J F(2G\n'XF0 ""JYzLjD`1]&ŇA%̊ m:\Ȋc"X+MK\*žrϲ375FRHbkj5ie)1>F/R*"Z,@DDTr ϲ3-9]\nHilô$S!JDJFDWԤ^h88NtDJE#2 2=yM.R*",@DD䵮tt{fT[#b2LZ3jBiS%b2"Z,@DD|sclluubDR_z6U#O9i,@D# y`%%eajm$R2Ͱtq57: }"'ⱱd-܂3fTB0+cY"u{akÅ]ghLE$d4tXrRNLBg y#'cD5,v'VLL'  ""j#16ZG*㉅Y6v᳣Kb?dګ`in)TSFf{N3EJFD|FRKo;NxoJDvb8 GD#p]f';!K?@Yz=mrfCcxdXćmNt wNIEjVĄD4XX'i8Y߆w?b2 GBЩdxz{^RnM"^}&J xy mkfe`јXh$ptwn_.VO==QtYۦ 1Q:v$(p)D|p\hDͮ]|@%nl߆@%^}Ƕeis"&,@DDD~dfDභq@a5=4="bBRuڊ?]@ՅUߌfn{;ngg -|kPfS_691 /ñ訪)!]gx$^s&ƒ<}px;gȻ[R9Q=kkƠr6.0BtTUznn$Sp9޿w.5~"%$"1}˪iLNo17- ۷n)!] k[;jvd<,c2d?!׍D$FDDpa֟'oF}ՎYڀ.RnX 7N_IQZl{zm͝VLy i#C&3@DDDߡV ٍjJ> Ɂ [GEĤ] lyxG:\ox>b"""C^͏,RۂܨܼNUmvgqJ{hmnA]@Y [~| Q?&ƅO[ <2mذ f2b:I T{l YPH)h8`"""3fnǶP b97$RJjDuH Ra7 R-ǽ0otH)h`"""]38?x/>:X-V}ɁJ}"Muu(۰"ز^g~^u^kn"RJ"N %+esonǎ3HA "ݍxk<t?^y*~(KD4ȞygADD4\B}A~JLK٦pdLlqJ؀7A xYp} %x] ""Atk>ũ6rq\<!`Ӊj{4@c{[z N W0ott'1-9oUވ誰 2ۍw7A/`E[9Tل˂JHd2BgXssZO Rur2v|g|ǁ3X3+^|R ioǻJ0-).|Kƞ8`D`INJB@Xsfm< KgѸsJ*_zCa5s}>ټ^>%"R,@DDDCOu 1wT4!׌*pEuF3/MC@|<J5DFg}=:US Պ!X59f"yl>Yg&NƳs0:b"""ɁFySĢX1! 2윬oƢj/ƁF74!:(#=uza؆ncM5\YX>>K#-.l-DAm+- q#r?"XD̀9EHk 2YȫhF*3HU3hBR20rrr2̣(n7\v;V+\V6+]u2u Ɣ0I̔HLI Gd{NŨ0IIxrLehh '"3N+Y阕ss(3Ʉ*Z:aؗJ% @^0 R´ȊcBK>-`{I[/W"@%ǝSRH ^v."+DDD4tX{{  dܚIUhllCٳ=PB&O}U-@r [fѳhbAz 1 +&&ϋ&hlLJqވ0 3bxl,f"*_숽TT-Ŷ:YlNŝSRq[N b/@c"""!NjNiA٩QPȤCtІ&-3`OYZK1'- K2p] h@mvn2WpJct`ipE}fʚ;PlBY (3tc;.7 r0+53S"1wtUDD t;8Q׆:#NQTgٖNԴutr)t*⯆R6',66YlpC%!>$Id16\ʌR>tg s  ݨlDsW7LV4vZiBÉƪr{wKGDCO)CX"p#"DDD9!%%E8DD窉g`""""""DDDDDD>|   """"",@DDDDD3Xg`""""""DDDDDD>|   """"",@DDDDD3Xg`""""""DDDDDD>|   """"",@DDDDD3Xga٘={1h@DDDt$ @A{=h&"a߾}bG AKgQՊ^x'ND@@j5"//u竼7|3t:]켦&y߾|7-F^{ˡj{WyɓVŋԩSO"""i@4o Av)ddd@Xpo>b6my`o:;;\!!!AHJJz鍊n*a…aͽ2\8?_jp)]O*.]*XgC 5kyKeeeBpp-l߾]+̜9ό ;_~ /^s~ Ν;<< }].z^;{lϿ ٳg: oڵgVWW缦F yKV~|nƁDJ"|  {GANNQ__g"11{;ٕl111G}}=z歭 ŸqPXX].r9z=ZZZ|~8?7Lh4έ'K$]UF!::gN+@}~"oDDD \ӉnV(p: /_]]]ޗ}jyr9\.l6J{>?"DDD Q>""`4!ۿzxqm5b{7>?Xpu^8)S\n&]zm۳gM矹#$$`=K, zX..Z}>]#j1"""a"G233F#[=m޼YHKKm. BKK&DEE k׮ZZZ$_^HJJX,`׿& =X1m4w^O>-[vE{1@/󖗗ZnϞ=’%K|@}~"oDDD2Sx駅ѣG JR¢E\}FB!DDD7pp566 WR)dff ~i?|% ӦMN>}E۟>@/8qBXd h4aٲeBYY@JW;/"DDD>"eeeHII;]z <<b!1x0'HPVV1̛7OHD# ӟ0ؾ};|IhZ<3bG#QXm۶A`̙w܁iӦHOO;ш";]܂ `cy""""""DDDDDD>|   """"",@DDDDD3Xg`""""""DDDDDD>̟?+''1[DNJD4X|Lbb"d2E_#ɐ84 Yz5\.E_rjժ!JDD4t$ b ""v~_!LED44xHR^J WTDDCg|P~~>&O6DR q*"DDD䣒PYY1H$±c ED4x {]'pwh *))AFFL*QQQ"""\<DDD1vXH$ϬYX~ȫ{ry;^GDDjjjAP(Ԅ`c """aqqq=a_{,?Db ""aufhbBÉqsǡC6s6VCO)C%t*2 ^GDDELV;NַΈҦ7z6g;-πre/ѨH "5,?õȊcL*}*,@DDD#Ƒ0`oǪ[Q W1*"kQCBH !j)?c9=5Q|P5PDP B13%R#1>6r)'hh1aSQ 6Uct;Ъ)IaX=R´{ytQhBQuȫhBFmNXaIftr sEuF3:Vv7: 3b1'- bB ^ٹTNG[Ԁ-jԀnYz ; +1:,Xv _ЎX]Vf'  |_bp" 41>6=G!L;"  ""aB'kfn16UCVன-'3R".6ۍsm5lK'-h촢llC?jQ!V$IE.'-`יz|rtań$<0'FG_q ȺN[(Bek'1+ WnfjEIv Rj! L\\L.. .ns&جvZ)h=&1%) 3R".6ӅuUxkO1!3:X<wLNB6lDDD$ˍwQf-EYI}Oַakq-6Ftv۠RU~:t!Pi!Spl:m4jl &v1;%`јX}-XT0-^:wMI4wb"""b.GPnxH \t&|tkV J8h u7ш.] 謭@^['%a4d/zC;M>+EZ.-1 b"""Bۊ?LS;LD.WgށRTt_&1 A D2.\.tQYSU%]fDpQ{z"[l³C:3R"0=y; ""!Pd᫂JNoD}g|\n_ě{J~JKCpRCC8UsSVTV Ն%xpN:dB'qTxԋE"KDDD46'^u g{hBPL c?,\.F[eZO`jQVMMwȧQd/g%GD4| &0&~y6v޿[N$]6ti7jNC҂BW L^?kfeYnlNt /l>qx{ !5y ""fw'||"^s{bwF)<8:vff""s~~"lFcAZgM½G \y k>ŞR~sDv$GDhUvwwHu3^X1s%uUy6vBIqogB#0AFu~@-M_bAF4q߭=G;gO!x['ԗ FԌ/1Ԡgᆪ.-齇BKhBKQYA < KDQ:Г!&;ք299;;w!oe%X2=vl\LZAY9l<=x{qNa"""z %F| {tdGL_572 چcĤSp h=oj Ñq7r/cZl Dbޮ վ!VޙC=Լ ssrt ?NݺZ$+ IǏAVVovVU^sJ2F;nvkRؚt S+XCM Ɠy N-Z%0jǣV)r,2\XJ'4^+H.w#" IDATXٛ|Za>1=fulwﻟG9;Cpi"&C#E,xfiYTaȪC3HAH=BDbzcI=F-9\G1iݔS0Os|3ƶr/:L9 Q]DDD9Eo9`ϻ4VPOGrME`k_ }OM-(+G"T3_IHl\K1`8Z)}4ʏJ- ǰ+*^}={bk!OA^ J+%"ѱUCTa((+andPM`gd ֞^Bc1{9m~2 0|a(jRX13H_Tcg5x z,?ݻcQ%|?Rc$˽DDD?bՉ+<Z{kl[t =a.RBz[__xtꌹ/gil+_ {ñ=H HLriȤlL4l[xiltfu][Xc76scƄ_C>"&%U_B_zVƳ~@Eaݨ1;u1%=6?v$[B LS^Mz y lSd0Wn; Kk ^}eL 17b00CQmc"""gMl8uk G yR+2x}ϗ!'ݷR*OG{K,"$DDDu^^I9m=QV}7)={Afd(N@z&&p+vlM1is*T"%$DDDug!HUێ]Mŗk0ޤ ,\]Puk~+ Ө.**g{ELHD괣p:KGv3<% laXĄTS`뎢ByonioŢC#>`"":K0C[i.k=0*TpED]p+ \4scnj?ψj YN\|xUsY#)Xu":vHy0s6r߿7)r.H8y6D2|+ {5E\O>"&vƪpvxc#~%ʇt Ih̗­u pk$Hocә>V򀽉HױQVP.ZK8 y3up>qeskSL Dj<8 9_썀 &wט; Rcc8)&6(Q Xp Rc>Os_)=O,@DDT$céws}(ϧu 2 Ȍ`\FzAiuoGWR#.b"":eɑ(ؚa|gg3[o R2H5lX{ y%Xi=H._Jph՚ 1d s+֦?GB%bB"i,@DDTg? A&tӘ/ s;X{zDe׸!FFXr$Jc>GS*Dž"%#AQo9Er>xk+O\AvqY /"#DDDuBpL2nf`jeG/6>>"%#m`T'c5voL%dj  ?Ƣ39YWJ+Xz~![b]&30MFX~ Tjrl>鈨&DD˰bPc-&˕pT84mb쿜1ݡ!@iRdDTXH툸T5XdM,,`RkC5/{}W)FDXHm -}}pF:s+g=ELFD5VR#)ygDId,!˰7Jwǁ$v$"DDDzԍ4Uo|vE%HDJFL*{4W})yŸ#R2" ,@DD]ME}GKxYTJqV,ݸ=;\MAR]9ka[3#*b2"zV,@DDBй~=뷡XtJ8Q9J$i"&#gDDDzIV||:hsmXPFV0575|p& .c"""t9% %8jOƥÈc0vrBH\ƬRJS$R*"zV,@DD2and&.6 "`Df7ҡoٷ@OH%%d 饋hj }+]NARSG0spDAمfFrwĥdGXH/]LFs7[YdR6 `li%R*%&H$YvRJHY^.Rrlmz,R9̭,6+R*"zV,@DDwʐ[@cgkȭmQU6Yv9YVV!j""mDDDz=v0#(K+Ħh̼,T[,R*"z,@DDw HikQ9PW #sI -wneV i9jZbNL`({b YJxZ y8 -QP@~i9L &01Y@D:gHTcwr%s31"hc鹟(SJ*g diRb3`"""]\;3cYVQDHw~egnv!"-DDDz' U $2~'72$U 1""gDDwrakf1)V{UO"9%%,@D )PP1+V(!50)ÕތFʒߓH0k3#zDܽWAyosdMڠ4_ϩ2)JP^uFDA=rק\R·0n-EüE'8F{(we>w߲3g,"~V)3y.p:&>s%ʪ#yN@DDU*g_ P`Esfjy!VkU@ݵr~΀~"JQRk dR+؃/#""#JTiޫ"IA9ȭ5w/{d.sv"FAA T@Z5ED:􎡼o e2*9Pe^[%Se$; ;?*2Rp42KGzN~T=:*5)*3" w-32PzӺ־{6,\81/x]]"vվֺKU1kQg=6U b"""S 2UWM5RAᅣP(2Y;{GJA]T/pxlZ9otqkf>"=􎝙15ؚC R.Rz좪gH7ޱ37BVfge (.#騻_/w~**5GRn]eoPݽj&p4嗖#DO[ "DDw,PT#5n6f9lMQ#h"ٙk| yٱ""""sӻ?Vu iOщ.Dr뉈t zV0p+Kآ"7GT*rsFcv3N014DL*=\J֘7sEi =AFqnj/dW`"""sJPQR"R*%ePUNTJ c"""$%Y""]R +g*\ "DDDz Jwl1|Q&b2Eiih#r Z* ""K JqfVtR.Qdk'Yh\L NTDXH/gnj>(̄RtJQœ\,@o#dR>HWjXwGEiEJE 9 2=\4gofH&ڰⳐ_Z^9s1Cz6(HN1idva,!3ѵa=ѳb"""p0:Yc>;RRĈD:$5/4qӘJ!4`"e,@DDM 5{ ;6+DIa15ԘNBF.0'"]HmↃP Bkz4EN ʉCg4wuY ǯC$"]DDDzmxk/Ahܽ尥 FFM $ o}QRĐ^#DDDz5󡭼Q\6*JKCiq ј~|li*R2"),@DD[P]׭ #b2691hq[i{.%bxkIDްV>H+(#:5B~uU*ӑP)Ȼu kw_LDi/#,@DDؠV1}}T#?>^`Urn@ Fj׆Ġf"%#DDDuAwd<2 K+g6Y. uoulk{?(Ѱ8dtvpߊD{XN26ӘY(LM)i[(),Œ4濜Gw/ׯCK.EiiFDڌcLex9 QQ"#eGE_Sw4pV(& eشi_Aᣏ>ϟ\$HSXHjlٲ8#$&$`{s /1%Y"$1$%#?=Иo8uEeJLѴr入K">>'OƲeiӦ!%%S`"""RvZb޽;n޸[b}Z{9"-HiI4vzsX-X~4q0|$$$/;QFի ^'7o)SO>~:֬Ywww|л9NHX{~hdgĐ,JcgM\nj^M7obݺu8<1h \pyF'DDDzch߾=F&MŋXf *_ӥA=mYy h>H;"_u ~,B;oʹR.൶汎ehhQFҥKX~=nܸ_FddDXHEGGc;Z>R) p ]Ҙձ=z:ZBڙSkl;&W~{VwKOHHLLLлwoa} ""jٳ'ƌ \xgφw~ c|b,<˩9sD 4;\K?`kAi)43#yd Bpp0BBB`cc ""J?~>>8{,<Ǐu'_qH$W:!6-ˏ]6>}ݐt8T8邊R:7;4Đ^۾2,Ap_~8}48"t/2bccEEoXH+j]7Ɔ xbDDDW^`ހV Jw)Dl P"q> UG IG+_餱-4. E/ }ӧDcĈqш .$$m۶{g7|ϼ::a(  cII^ky検>l}/̍}M+ʺ#:zz… uVDFF'ND:/$z&,@DD$t;ݺu9Ξ= ֳֶH%fWࣝ4r!moj==אUuBG+mq JsVHH$ >XbvލcΜ9(,,;Nb""ZT*l24nǏǶmpqlR\XZ',=4M #a.#r l77ضq6G >>L0qqq;w.֬Y___,\?IH>jс0uT/)SjR1p: AyJ\" ʹ,7qK9qi /3~K_~% &xjEBB 0 IDAT}iӴCۡ5^Ywes;& n݋RS҃( qs^zbn' on8. q|X`]nݺW_Evp1i= ""z ϟܼyGM*v26a(,uR߻X'?sص %"&*/,čݻlCS@VBVlydR?[5kٳBϞ=1l0\zUhDZiӦ_#22=z;cir= ~; @YV&RJ_yq1nwsCmjA".Gf Љ~D`` qAܺu ͚5ԩS-v4"DDD5.;;GƀРADDDhnꎍC5٘aVݨ( 1) ps.Hp77?H)޽{#<<;v={9s栨HhDZj͛#G`ǎطo4hԐ^Xj'֜moS G{vAQX Rʺ4;7삯BfoGbѡKeLwto"R5h ⫯ªUиqc]*JhDc"".]`;v,]!CFLꏯ^l)[B\673 "P)Rʺ0%q{v%NXXz^i+RJqbڴiŀ0ek'N;X虔chٲ%JJJ ;}Z`cumN&8=k8ƞ|Xj-ɾ~7-p^Tcܦq}DJ;_l2x{{b""RRR>˗/G>}fxxxKT{1ral-zH=c6 cHxMܓTjCŋk^\~^^K){r_tn,RZ?`ƌy0}t[q#c(..Ə?ÇIk%dbЊp0_3fJd3-\;wHiuOqf&RN^7W]RRr^^}*}_=k꾲2,Z| j*tYXD5]@DD'ND߾}Ѻuk\|?=0؛;gM n&PJq5P ;kN2Xg]H,~M]lq,?sE\\Zj.]`РAHJJ;Q ""ݳ>EEEXj#U0eK(~9s \jA 51S`;p ]EFX *,c;5r%>,Vcж{ўGbʔ)HNNƼy0sLȸ ""PPPXn^z%Z NNNޑ?bgzUפc_uX::m[׫'BZ풟 QA~*6Ȥln`]sTZZ bêUо}{c= ""tԩ$.ZǏMO!v^2`H;Li5ne`8[ww8iS{{(- a瑛>MͶj5;x GkO{lv"${"##1ydaƌ?>LLLĎETXJ9kt ׯرtZY Qh툟FuAcO9}#ktv}Ӧ)ȹ(V>?U"0n /ćx[-SXv-fϞ gggO\$t Q^{ n?QFI Ƹ? -05Wy .'bᡋvm@jPJU^kא{%9}[{#j__Zchq7pedd`̙_1|pZ vvvޑHKQ `ٲe͛7qb;~-;z|RLݻ'OFEEVXC豰AYYY?~L KtwRrn&d@*>%퐟O>\b"z( ":fx뭷```M6{bG[/^D@@BBBto\KF׆_.*%nⷰȇ <<` sz=HR"?1EIP-h}].AD'ap\H>|p 9r7]pL4 Xz5,v$b""# f͚˗^ʕ+aeH͜9;wD\\\ cWS34ǐoȤ?Q37ҡ԰3`bkkze9(EqF:i(͇\yÿ*Tjq_ nwܯ+**¬YzjXt)lllĎET QW^y.]' c@Rƍ_|׆ƥaHJ%&wǘ agV8UPV73pF:ǥ!ma7T1j>GAnebr6RrQYU<ͷ}Tgk3Y|,vp1{ĦX3#9^k[{6G}G'Eګ~!/_aÆ *,@DDz֮]So'RWL0ϟGdd[7K@v|2_9 d!X² (*++@&̌agfL BҰ5vDBVQz6vŘ 1Lho*88ƍCYY~G 6LHD,@DD֭[1b_C ;R"\\\0e̛7]- csC&nzV5$Еd1)-Q=^k[#}jgHeeewm0n8,YbǢ:H۷F oߎ ιpڶmlٲƎ{95{/* oCV:wBa {>JRə8S73w E0KѥA=oA=SL~7mVHTG鉊 ̜9˗/Ǹql2N?>֯_DnǯFh\NLCxB*Tjʥgfhl _KwB}Gˇ>{QrǍe vRrp5=WBfd>DP}gtuFF`ndPAFFƍ}ww}CC>j HMMū0Z.,00mڴUj=K+\NATJne")J5P.l͌`kj CWP"DbrKP9\[sx[ݾmP\T%%֭[3f7oF ĎEu ;{,Tm۶]vbGRRRݻwcbǁZ_Bd" %,,CY JU*_ob &28&p0y{DAH:*&&obbb7`ڴibG:K鰟'OFnݰyfb۷oLMMѣG \Mb|Kp$ A]pYo???b٘>}:?rzj.I""10a~m|gؿ?ˏF׮]Wpr|"'"]bbbe˖a߾}8x jx'DDcн{wl۶ vٳۍNZ,44N]vSTݻEHE '|Yfaȑ(**; ""- DDDĉ3fؑ!222pUtI(ܹsP(n{7ӳJqqe˖|رHDZil C۶mŎDu8 T~T*\z۷oT:uꄋ/:t)v$a,@DDZG޽ǏYH._ wwwڊV$&&VY:T#pA;9r$̙J%v,A,@DDZdxcΝ055;=քkT*{?Z \.ǂ yfXzBFFرH@DDwVzgϞŶmЯ_?#LyUT())ElСغuɨ.ȑ#-[^;^GD$D!&&!!!,?:*55;Jdܐ"R"I&a3gWGb""Ill,tBcǎEbGuWWW$''L `ڵ0rH """ХK̙3_ؑ\z;ܐ(~m=z'N@ǎ/v$R,@DDСCҥ :ucǎAbb"Ď!:777TTT ==](TG̙3@6mp #b""E;vA0`l۶ FFFbGc x'O?;v Q-1rH5 [lؑqwk =1b1˖-;i "Zd7>֮] L&v$Aprr;茌DZ7n?ӧ߆R;i>93g.\ `bǡ //666b \ ʹi7|YYYزe LMMŎE" "DL6 ~-VX`mm-v 3@u }ĉ޽;222ĎD"b""zA3|r,[ ؑ9)//Gii)Ď\]]yRtt׮];9l2lذq9*--^RuZM4'`ll=zʕ+bG"հiӦaغu+ƌ#vz***?[[[<== OOOtbGZDDTO+Wb6lqQ$IT]V.H0s'~(|9I&!..̙S}Ŧ-Y&GYYΝ ___bѣܹ3vZ@a""!w/{[~=F!v%wխͥAzr@PDu `ѢEO͛7ݻh" %%| كMȑ#_`Ai m49>S|W;v, pzΝ;sϜ&?İa0h _ej'|%Kয়~ѣŎC3Au-T 6mZ/"ѯ_?|"M[nL<ӧSZ]jL&? +++1ׯ "g_/ĺu[oj1@PD;}RM;u\9KJJp>SNDB"`ɒ%066رcQ^^Nj# "g_O?Ś5k0n8udffZh\ߧ375M"`…044ĤIR0i$cs{ʕ+1w\|mafdd\كn~+WЯ_?XZZ @LLcIKKĉCCCaҤIHOOÇc1Zj?FGG^90d$&&Vyݣ (Ǿ1_pgU}Ι3h"/_)S`bǡE "'o DŎBZQXb PݷG;v( ÇgggFu#sm]pqq9"TSHKKr^zI޽{ h...N8'}VLذa?СCœ9sl/ѣGWLLL>衯{?kxk|ϻ<=NG[ ŋ,AzH;∈ԩSaرZ;i; SN՘=ԹdP^G^z!%%c>s(J`gg1wrrh޼0w܇pۣG*?\\R}=̓Ayvm.@pI$aɒ%bG{Sll,nݺa:q=;ww'A5kcdddTGH$﬍VVC_(,,|hٳg,YBJ^%''_~1c͛c&}zOG1}JE IDATt,]T8TCX۷ /QFؾ}; ĎDZ ?Xa:::jc#F7}GQ.\?MtHB C. R (]Z^ 6@TDP!$RH%Mvw?bCH(GygvrȦw1e$$$U/عs'ͥDz 1'''ŋy;0˘1cUo8J=^*-U:uQ^^~ϼU@1V\YZƸq0eʔajhH;#?qAIy+++(899 qqqR!=w^]=3_|Q^Z}qzm~G!;vL(,,<(8;;{U4SW1\UjE7xCŋzXѪb‰'׺qoh___СCZy'Sןku7uߐ¥" j"55SNrzx̚5 /x P=z_~:dXOؽ{7V\)u 5+2e zjP5qDܹ.DTixo6k 5+]7BTJI&!'':$Xny0qD<ӈ:}KiF8Ԅy{{#00Q$DtL& &pQ=DDBvv6N~ӦM:5'NoF#uF 6m,--?#66 ,:NǟND,̙3Ƽ5 & ++ G:J QM~zl޼Y8 5y6l]yfX[[K ???ׯ:J+//1ΓO> `:NDDMZrr2/^ O>Rǡffƌߐ'uFU\\ c饏?[ԩSQ^^.uf,A0c cŊRǡf'\.ǶmۤҨJJJ`nn.u "dbb-[ڵkXtq% "jk;v 7n8$ kkk77n:J*)) ]kk֬Ja"&)66*^yJ3fĉ|bܹs1yd̘1YYYRiVXs΅-[&uj RGi4<~֮] ###̙3G( 59?#9 &&&RǡfNP`ܹ믡V(x Q`صk~'4,@DԤdeeaѢE?>+u"cǎRGi\h"HKK:NDDMoccc,_\(D՜1zh]V(5[[[RGiX8q֭[+WJ8D"DZcHKK믿ggg\vMtNCii)g~-v؁۷Kɓ HAiZt8vd2ԑDAwlڴ h}vL0Aℕt5r-QZ[7l4ˡ2UH! lM`gaK%aii={`ȑK ϬY 22-Z:Ne$u"oAxx8Ο?Czijزe J%4 @T"%%Qht:D#"5Q?}\Bw+%܃Fk6 G{[tpO#իWcxW ,X`:v(ujU/adTkz\7o>181i8i(蠐eBVI]}fk;[:fuDB)olfbx &3\DvquD_'qTHDk׮رc1qD;VHMO#"pBl۶ QQQʧ~^z Z} O 6<[PVW7" {#p3Fr9:ۡ:١=9ѾW\N,Լ(2th12]= -`ܸq@dd$LMMA@`` ֮]ٳgKH$%%AAAHKKCyyy <!!!ʰb<7 03z8D?f\ q,&^MŁ+H+(AKzaRW leDcɒ%x7A9r$p)\ؒOU yԷ۵n >Z d4EX C` I|,.%ebv_?tv:"$/_{W\q "2X{ŨQpqINIII Bzzz% >^tz>:zB#Tڣtl,OJ^1UB.DVe.V#X ]/L Y-U&p2m|]tz>ŦSш(@wOl3%/]棼ԩm&u& NC.]кukb ddd(A2 %%%< X/ 'b¼ٻ-!'cq*.gܢE F XXF&PĤΕu 4j542h@i1().@.5zأG :L{>! ͧC!٠xmxgpE9jv܉ AdAZ~=yDEE2111 Bnnn2UZ>w8CR )|T2\K+)8uIP(tpf0 CFբy9(Ʌ:7LBPg <nF8}59 7GA'x~@;{x'8ŏA1IIItRJ`Xu?~<>s5Htt4郼0K+d4EHi`Ic47 (q23S}1+-{]#@xo\7ϕZ`n݊hHAΆ/,X{O8DP ht:^ֳu7 pj2̭`{02igf H Ǣu.~W;ͧcT|9/m-95ѣb q  4`ҥxץCP{Ǎַ&,Zϟg_7wǥLX;;áCxzu%C)-C+ȹ M@ZԺˉxc(-bդ^xWf:u*V^-uDDc…ؾ};bcc\0!YQN2= ׺ߑT8[O8u ft";:/BSV h7Gρ Wqgqlj2֬Y%Kps(T7 "2)))hժ>,^X8DBÛ Ä@o|3?,j.TQW~=]a7` =`ٌO}iȼrYaaa.X0(֕^K3C6tE$ EYYZnqqDDᥗ^¶m8C/% 9drfOi} =zCI[Q;K|T kW2t\N1].<% _|ŋڵk:Ab"" ___;I_;5=g)hٵ;ZCv6W充H=}9qx6'Y>?B16RDGyy90d|7R1H,@D/^[r WG`'16K -~ \҂׮G^ 8k#6PlЉikn]/ o + =k׮K/׽T>ՎZvv6<==o^:Qz  LJk;8 +<%;b ;__i0ɡ'}= ;`0Ut3:0m$QZVgy+W:a""tR] 07dX<_Iƙ0O}8NEKcK޿A"1rĎyCjfXZu'<g $JJ`/_˗#))k TLDz_|^x28exxLB^Uk0ua,$:u,?/L ~;~xCLim0m!|y4A~zŲe˖I6_~%BBB?L8DYXkvf~)~u D0pπp˓=4F&&m%5:u>P&p4łO@mL6333~/9K7GDz0a>S[nWAVQ.-ŧ%gcDa ϡaSWH$ Ŕ>5p,,QJsua֭x'c0XH/mڴ gFll,W!Q`'{W_/{hk2 K6W! +S}WOٓ}$?cǎEvv6N8!uDDz)00BT/j }ZY~Xۊr&~Vp׏id9Hؿmlh$,LDۿ8?WO~$jÇ#88իq  靪ϟG.]CtO:AoPT 2\Dۿ?Y? NpMTQZ={H#GBgr_v0#8LAt ظqq ~""j* 8 ƿc5gc1뇿2ˏĔff15{QX*S lR5L&m6I ^Ÿ J^V\ƚpl _GѶ?.'`چh<5E/(-,3f4n O䕔t? lァALFD)駟/'1 ,@DW xyyaرRG!CRlLWLm;_M[d'J3s|5%m *SL6eZ Տ5Ǝ 6H _P(R!<_ֳ5%>]3<%c<+oox ?z>#G|L [DۘcH)s0%Q͜9NիWXHolذ:gϖ: ]5Z{QmfP聙ZåI78zQ IJ|:u*̤CTb/tSmn?r宛S[[8ue\ ѶϞԼ|?LtDc׮]RG[,@D$'N ::f͒: QVuX5I|C#ѩU \t9@Y?CVW=agGu}S$aB7nv S9 "Ԇ ХKJV7 |Egl7XT+0}߰o+ C'} DgcžKM VxS#q!11/_:^b""a֭x礎BT7~;VxaP;{/" $JF-:w^BLFAB.Úɽ|^)aBunnnعsQ Ifh4\ֹLl=#_3_ح+--%LHSPZYb֓~ 8b IDAT aDH&aԨQ,@D$7bȑ: Qy}|0h|Pؠe@iQ)p#gDhG{t| Q:=z4.\{̰$q̘1C(D:}IX1hdHs d\trrJ B'7{L%3H_CTȑ#RG;,@D$-[#G: QvF_'T..%ܣ u#qfv\(QcǎIE$~GL4 JR(D5QX:Z|c.L8w)Q2֮󢛠vp@+~Ղ݅ pUL>](Dxej~c:A` S[[ ӑ;wFzA)֟X 'dխ_~@vvQ 5[={]t?qYR< HBTZ.ZU͒ڴan][;c_#]߾}!p{܌Q?|I^@Nz難he'?w 0O9)9z.V408"YP#aggv4;Q:y$OJFu'a^v06+T\BcЁ^7g&*l0z&Y6:XQmݺ۷GǎBTî䖨\/*gghѡ"S^=&03 -M .T(zN1i$j( wEXN]&#}aU3 "$JFT^z/_:`"FsIbʔ)RG!! ]IAٟ@aתDHش/WR^= [;31&#BUXQj~1FK4d4!=|DS{®5%#I.]v:`"F!oߎKVaT*c ֭%LFVtZ<3!F&Jv۷GDD1 5s!99RNG0h10Edl}[XMW9Xb@g~tjѾ}{݆o&E&A!=VP+xΞ3< W#7o";;[(zů\D`abT=Q 6l5@_-]!7Li S϶x l`|o?3_d[ԥ9w2:So(,[phoA[+ %,;Ӭ7LZ+.:U__ .AAPo߾06"AU֭[\"ﯿ[qC޽2D~W.v5fj9_ ~.]T ͛[uú\S ٚ&!|ޛܿ~CxUģ/?C=ڢ|dz<{XOe4*[T>YTƲo\^]T0k9D??~\dCRA&Ȩrۯ-,hο ygddTU''{!~ʨ*ufAyH.V'X vOY;!)%ї!%|p zT>]4r(O !H߼ N3_opFV ;>)#׍Be'55:L8@ /$ƍHMME>}$0yd,_SLABBBu,zq^Ul=e˖U@7ҐjHv}alT;N߱`<>!YhQXD$,uo'#mrpkjz y@Td!{Fl+,:>SUwC hu0ʪkH?C RݻK4 ju6lnw jC3㇄ĉ@5=zT!077TTT50V(jTu͆k6w<#/r9,gV@鍫U>^Us/4U@i?|WO}Y*8+mHFѩZDɗ[DߑDЄcǎ033,C~˗/G^^rrrdɒZ]llllСC(**ɓ'|C>~aa!:7xXl(K@@.]o(**1c Q|Ncǎ3g?@PPCͮ/L5V2SV3k6ޛҸHjT#mJuXV>iC[MAR!O:-|dyˠP Qxt%E(tYۿ~dQi07qHC*o6mӱn:8::bֲ_ǏSN;v,bڵey*8}4ƌӧӧOǘ1cpXhVZxyyaXnh|N>CN0l0|'XjC^1G1{jNm 1/*DNh_ß~wGNGu9`p빯;2+ {7qAua޺b\q%YtF]b5̔F,@DzF&DdJJJ`ccSN:= O`n̏ @5('l*2Sqy+ZӁ7Gq{vc >}kU!&$I+0-o8KER  .B [=r Vf&Д:zxwA#+x6HHE.h,H]uFDP899[(DRu[ft7G+sTKJ\ɱЕA5(,2wM]\gksXVQ,Y =ի12]$oBuaD6_@aBs}pi | zoizI^(+-O hFv!<,%JEDu1:5 'OŋAToJq#[\v|T8~5ST͏`z zD{[odoVLD3@Dnܸt^C^qDygE  "}DDԩS066FnݤB ^*eNJK)殺IN ['֤@֘""ӇݏW[\NupH PYN:uSrJ6d[_OD?X聝>}ZtcM"C9E+uCTgksZ4GNnwXXTDT "z /_FݥB` @xtrGivDȐ**PNntt>b"r5"00P(D fc ; \LP&Q*2$E :k-^bRVRDDHXXLLL/uɀ>N8#.;A(/@EiDP^'[7A,,õ2{# "z aaaԩ1[/H+Arnq1z@ArH4vnиtqDzȥKXȠn S!WSDc(AYnDH_Ƣ-ڻV5vxK[XX&cRWz>ZP= *sI P)wWhY ݗDEEqI]}VPc sA^5pGSsRV4|:E "DDʕ+j,@ds ~:+gv_? JQQ^² 쾜'rH߱} |}}3?²1+qBNTH_T /) o9s:A3H",@Dt_"##?Fޫ54:]^(=pPbrW?GaB7͌%JFDW.Dt_жm{Hd@-L1' {+dFDH􁶼WA06rr%dbfg   j^q8/'A{=uHJYQנ'_s2mxS"DD V^^x jqFOhh|N_X(~DHJ:a1-LǓrt oX'd$zc"Fa&A+,7 TJ53"QQ\,a:BFDd 5hOv0D,** rm|wjt󅳵9V$߿L~@͉x?,oőH80fJ# QCQEEE fffRG!z$ 9_CT,Ltd \@„Ԙ/]N׆ww &F ,AdDt?X53zAxEڣ5RCC%JFIanh5WGaai0!5 5صkXSexl "SsE㫟艜޼)aBj 7ϝk'hTJS4r5F${@$ "j(](Dfވ.e=pME4d_ƚIlm^=^R-07sHQ\~OjFTX>s9Fr9v]aRϞ0!=,br7_'ڶ3((+WO(= ,@D :Q׀fyAm-ٔ›&!qXœ}DgodW=D7C%"DD OOOc5:\u huf IDAT,'>nF6ڣDyqD AeG /.¤zX0KY!"2<,@D ,@ԜupeGh\h۷nF#QB_IH>Փzo+'Ѷ>rl KdXj5XYv 3ه"uER-(@&R" ~?ϟ*FQ%%%AӱQE¶O0{Q6?'Y0yq1H={N2ك*l+,`v_?<х>5,@DTo D͒Vʕ+Ѯ];b'B<>>&o`|6%i. '&FTVCk0SUo+Vkd/0%=lFޅRBB,,,ТE 5h̜9ϟ,^J2Yq]72;.@46 ATd W >"[2J/&jJMDCaʕܹ3j5Ν;^{ J֨@pQ z=uRħ:!!(cAW;.SsЪD)Qa"z pԜ\~ҥK;СhLs5}M)}p0)) EaR"?%aLG#jXX9ԩʪg}j?kTCL:%巖d4s B}OL !?6??CDۏ]Oüǰ(CԄQQSw;#SN՘9-ixPUo3˱A޳5:$ '%`#02@|:,|vĪI$JIDE"99M6{űc:S?'[8'cCmr Ǣq~/mY: ;6lDQA&9M7hzр%Fb;+`Dz_,;W: rgfg.)s#QsSܿ{@c+r1}I_ vJԦ"wYQs-;O?4.\sΡo߾M:5"ž$+ Hϧ 8APq4j%H bؔ|Y#Q;O95Hrr2ѱ=4Cii),YΝ;W\|TX!'>liD7X0 7qs~(JJ|T$ص]- 5S~"1 C>ݍ88Q޽{H$; c;uok׮Çlbȗ៻9A=]8&%Hc'@v<0OGymzX<3E]'j95Xyq1nߏ v~=Zq~:c܇>n6$v5Hzz:d2Y;۷ {Ů]}bgS'C"RU[D_;Q(Q}wX@,Ee ;cX›8z&Ƣd7sZB9U矻9W(d2|EHJD=@D iii\4͛71n8꫘={6oъJ xL/:o8N߀{p'HeC=|>m1 %m+92RL$1WV⇈XQäֶDTZZ{ݺuCJJ ?5kDhUfS'#G^^^mH tESB~ mي˗!TA_V1t Th)()uaØ3G^G #R:8" \ƍl2axg{8ZeSo0C>Pef=-:f?{㧐 kް6i#5؛˹1_/ákwGQXR?I=g"D.IDuɁO9R8D*]^xFhh(VZQ hqT(|<3׸_F}|?X}hIaݺA1ܪHxp2'S! Z5T>yex;y 9p0gQK8wQD "WZZhM%}%%%Xr%VZ777_:tرm+wc0C;}D*u01SAXs9` 6]!51 H˗ +S|8svng~~JAk-$DD:|0 kkN &۷/c…;c;z^rxk5XW?vEaKoo::B,ˑW /(Dw^>n֪y߅,$bnA^pr?X^1b"zAWWVVVbGvΝ;xg?@ǎŎlx[AUwYl8`oHuT[S+&ñOhnn0utz/-(@ޭ(} 46^2Z_w##=Mg0ν5=EDSoE"R С$l5R|GO`ccݻwL[ t>qE,y.aYuVhi!+{";.B߉8~ `bgg::@jf&һt'%8 s`nn=n+&~5.@D5DDɁ1u|MÙ3g+vVgkj/b^P,>"/`NO,=klژcvRؔHLt~7r R}}CRSHM})#B"@ %r( (JK(-r硢B ]mt.8;e7*S(-|s2HCOG+l;t`"" "Wnn.,,,ĎADff&~m_;w!> ꏷ|+5|{ boLL҂5|n,}1% B$$g"9劊&e47҇ @DT/@ӻwo:u ;V`mxP(c7pf>y),:mLkYH(@Bf>n9n J ^2s˰hXW lLfŋӧ79QMXQJ% 52[***_g-- $àN2 Cdee^e|ԁDxښ An*rKSԼbdGfa #~JJE5}!;uLu->l p01toꐐ<3ʂca,N9996lG}t̟?K,L&;=AOOGj˳7cƴΒAAAXhn݊E9}SĈN!p(x??~<zj?Hx05$722¤Ij$=@DT=@qqqoaǢE`kk+v4z sBBB0~xӳUMDm{N999҂QHMr[Dq|gHJJ» jhѐdؼys6@DT|A1Nm\.Ƕm0c d2ҥ N<8̟?oiC<(믿BV=7]!:ظO#11| accP믿Fjj*~' 0@<ӧOm "\.QHK.ÈBTT`ff#F`ڵ2e 熵b } |X.5 @DT'\vB.ԩS8z(9TTTwޘ9s&FXC*͚5 k֬.Z"j;[TTT6J.#::GűcpYÆ âE0rH.N9s`刊„ D@Dm "{ApyDDD`Ϟ=x"J%|}}1rH,] QI͈9\\\0p@" VQQÇcؿ?abb<3f \]]ŎIjN!pW^yjeJ"*pDT'iǏcѢEѣq L6 Ν;1|? b`;vQ9O?᧟~BBBl2$DADՉ=@%** Xj^ubΜ9;aRVEllQHC":R)))0aЧO$&&bŊ000;ֆ C +ŎBDJo2"R[P(JbGi׶n ___\x{ƍagg'v,jG ZD"̙3 bDDUEPPf̘q!..ƍ;CTٳ#G4 "UYYm.]}㈈/ssscQ;U^^VbD$,V{Zח_~'x:uB\\&L v$jԭxQ\\,v"0,V,ZWyy9-ZW^y/2c]̜9rbG!" j9@'77GDDV\vqK{XQ8u\|{ݻwq9?^HD*ԱDFF"++K(DAXQ8EDDpssɓ'*H$UI]r-u=qA*b֭5nzH$M>R/GG:%KWl꒥)9JJJo:::>PcDDbԲ>cL2sATT,,,Z܂ ڹ.9ڊdddI&ջ ~is`ݺu{n\|B]4%`޼y(((@TTT=x` qN8q`ՊC1ydlݺ`CC{tttҤ=` @DT!p_Mc8t^MmH+W0vXǏիW5-- χ#  ==ھĤI`aa}}}b˖-57naff)S ))W^Aǎ+++ 8:Μ9SxX]KJJrJ‚ }322W]O++6?۷#((EEE \xQzؽ{2Y^**wbu IDATbbmP \%!<݌ʬyӧ㣏>Bpp0ܹS+o|=n^^^&H0uTѣG1f$%%aܹvlmmcߜrI6l(u^a!!!صkW>'GҰal۶ 0`@+$iLG"ZU2apo܄Ԧ.\P}<ĉ>kV[yGiH$HIIhiiaUJ|t\ Qm۩SЯ_&;yd@ttt};7i溞 U  رcGO{4&Q5<䎈4HDD@bGUmGS̙3G B^^pժ5}}Wǎ C vvv ^r%Wo<ƣBUtR!77WήZ &&& UXX(8qB2dHI3FJJJ4aҥjs7srssnݺ &&&?P[dd!>>BǎMmLa֬Y`dd$L8QHJJ/ʶ[n &LLLL###! @rJvS4|^{ڒٳ:zzzBn݄[zܸ8! @022ѣG /_u?~\xꩧWWWAWWW033z)| ^B:w, VVVѣGVј}srrW_}Upsstuu[[[aĉ©Sk渞 5aaΜ9v ZZZ*?|!,,4؍9^5S1l+X= $EDD`ҤI>Ŏ 1rHdddرcptt%GPE~Ss;v,?^dXti7>-睚bҥ8pΝ;'vQ^"  DRRQK҄U*IRL6QE^\f"RO,V xqQѣy57MYҐ!C7|Se)O?ϽrjK77~މ "փR)r|rlܸ[l#Ď7\Ծi8AQ0sLB8͎wjU_B!rW_dž (viC`HMMő#GĎBDjՊPu;v+˗#$$D8DBӆ7|}}&v"R3,VC^X=pIbX|qZ&Cؾ};ŎBDjժ򆧼\$⋏G`` FJ8DJS 3gB.#""B(DFXQ8L:ظq#ŎDԪ5jJvvv3H "=xO>$ SSS#:M DVVQHM"Zqkĉؽ{7d2qDPPPR)n*v"R,V}w}5k`Æ ;h4u!00Q@DT<x饗l2>^(DXQ$̚5 F;#v"iz4j(d2lڴI(D4ی]NNnݺBB"""pyr"m%%% 6m߈C FXX/_D"v$" "2o<ڵZmmmH$( tҥ@í[pYH-hz`ܗ_~ӧOb!"qU VRrAGG"kyWo͛7U8DjATBTj|Էo_xyyq1"bDD T*s2L:hxw0j(EP4y\l޼eeebG!""*Ƙ1cF7:mT-/%%2e -[&v"Rٳ(Xg}U\BY`nn~іz\\\pXãBsa077;iK=@apvB~~QH$, Ը *Bk.\֭CnĎC*{J AcHXQ5O=THWW&MҸ $&&VksΝ~.M8@@@W#jXQ5VVV4i BɓEL4aaakRٳ5kֈH!ph$''Djb:::;v&""_~%zXX/^o}}}#6&LSSSl޼Y(D$@DTQF q˱~(ʪp>>>駟CDj TiӦq58vHKK ͫo111(..wEEJ%n޼"# mq`5XƊZY6#f5||acFF>rKQpJ%>M]GK &іP C),0W㨨(HҪ?***ݻcժUx饗Z!p0d!,, V;mFDMP*q=-q9$f@ǡ3J&WW[ .VdcwSt`` a3 U{VT/ \e˖h"MD"̙3/࣏>ؑ"j2 pf'̭L\MEB m- \LЩ)|1擵1ENzu j=ny9RVobf2 p.)[ND`eg+ t :Dyn233qҥ:ӃL&ZK[MƘx|}}]m`o< ՏuYWrrg}_|ZZ|QCUj=@vvvGXX v#/DbܬMƘakڰy-i8;)R[`mm@ZR}蘘AFfRiF+ "W Jb-- l }m0]!թ}&O[3w/;dcW'S;3{DFFBOOeeeс=mۆ~5 bCzz:lmmŎCD-L"4,8R >ծw# ]~ւ5t-``a K蛛Ch(+*PVPy9\feB^P]mmrXo{vЩ&%W/^R.xsLOX8}4$ &NuʪasDT~ 3f̀R;Jd裏 /,]sĎ"*@Dܝ,w4Fy;yrl>p)9H`j'e2v8r9PvdHqc:AOb  ,OBիWcnڞp̝;eeeBCCSNŰzC4Drno?-g0 6Xqg;VO^DT6=ҨQ ɰi&XB8DԂXo-'P*m >QA^Qpj MMaV^#UeBwC @+xui,FtWYx?/%;Oc=' c j/˗׺ i>KJ0g_x?1-.IYc/. >Ĩ{q6G;8={jt(#p: fk|u,.om‚HɕWcq19>SGi/CĈZPF# /PO^ @@Eǧ͝pq)0>G1Ї]o_zDv|<6u'c.x+6&E߄;#56^爈j^zO>ڵ+1`Q a(P1( Åj,~2  lnhs` :.Ia `O@Ƿ?cQP]@W+CjZ.}"%',3f`m~;'Ov#C>,TWi_ҝge6JC1c>a"Ca۽;gث 7نWR=6^<գ@gFnn.ĎBD-Ȏ';QXRsoM3s"1 ݎ/@ֿ?:= 3gҪ/m]=8.Ӧ cڇ7E}տZcz@ `ǎbG!Klyf$GW:2|i`.NX -Ճ˰ap_Gvz_t q a_Ŧ3 "%RO @7nQ,ZQAI~pާ2f^r={ 3;;<!\?;/VnmG]0"NP"5T^^ޮU Att4ŎBD͌Q+,,O#p'']Nv܅}'7# :MW7}[e8-_L53–XqNDj= &7o; 53@D c܇Rzu<}9R53UR aaabG!f(K4!bX8Y.fxLС| ,,8 rRr* e>Xq"4OuH$̜9ᨨDxEB~{EcReۖGС[w#RB] ^dW~`_@OxEJI$\"::Z(DLX⋃ n-"BnпH aFF8qzU}2<1j^$dHyx{{ח!!,kxsiS}Tu=Agg8>G_1k"Q\ڦ%ƹacCahs*`(..; 5@DfVO>ULLxP#""B(D X5REfx23᧲NvW .Æ|73۸gÎls2sb G \ `DL0fL@\ ŏ3С5|u8Neۤ.Xl=>RHH"##.v"zL,šX{j(:ۚl{cY1c-ˢcGWi*ޝ:bq\Qz`ԩǶmĎBDcJ/y`VNU]z6#GBjf*RBj*;_b7Q]Xծ%y(Bz},~FĄD-C022B`` W#jX=;NCW[ _9;@&""BOsRwQG #||}nrH ],Nj ǰ$l8 ѿ K1廃0woG uQqV>Ae Ą_Q6) FLM6 &*hqqخN*^tyrmd}bEym ?cU%,KGG3f@XXzH"jE MS}it|*~w'QU{Qi9f? WWXw1!5;NC#>#+#/lzmTۘyCDm U}A׮]cDKw) H/,À"%gb ^H(j֒MqOĄD͏s1c6oތ2Q"jw2l">:Z~nd?caۧ7ELH-I֣tM֓*^wP[fϞ\DEEQ#wYU``nݺZD[  r%l[N@D5?@չaaabG!&`D񩈼Sȸ$˾u2,:?Ũ, {cgTD8fؽ{7ĎBDsC<.Si_YXiT IDAT:^dí8\nBL> `ǎbG!FbD@gngp2a ۻϩ<%V[)H) `T 7i @D IT,vCURlYXCBt$^^pN\Wi_:dе"%#jX&$$HNN; 5 Uo?T%zZ.:ĤgdKO|u v;|?"#z|lĉ͛ŎBD~8z xҷJa[|p7GgU_ YP+MOOAAAذaQXգTQO\]G&fN%;nϤ&&0wsŇT3F5Ѳ=._HH\K.Q=v_R0HH]twqfzUD_N]WY*H4 z?y#haTՖ#/ogo²KW0ik|wJ(˜ Ms'H0sLX!^U{6I тeN"%#ucD^qYU)xaә55Lhh(RSS-v"j@Du~& tU0sso ~J~K"%#j:kooo",,L(D,M }]|\LʄHH0uvӪ=S}PPb$5 Q\\,v" ZKq4> An*NLEJF½zᏋ FDp!!!>v-v" ZD^Nc:z&ƮnD"R2RWNnrPr58,p:t?W#,j?.:`$/ HLσhH}iik b勤R)R2 Add$ߙDJA@dҾ/. R}=d"%#ugc PXR^ik֦\4 ƙ:u*m6QXrj.J0A}_\  C53utBET^zj-"RO8FFF jpDjwqD58v# fzhUV^`X+1Ї%]>DI9Hs/44OŎBD`DTS7[h=,/+HqzJ@w[**p.Hsp\5 2 7o; ՂQ bn*m'ӡo}SsR0Jj<֦`b"&#jrH$kmmm̘1aaaA8DT@D+.Cbf@'nà-կ2Jo[H$@?7%b2)//g!!!HLLDLLQ,q)%?pv llDJED&FՆt¥lR5B`}A׮]L "5waeG 2ҪWK RrTں;X">= !(//&9s&6oތ2#X="n.9X@+@0z8ZuBĵ}:A;ĎBDaD{(PVJ*1 j8}V+v\,Mp'w5_K"$X%vP,[*W[V͢fmH!2d99NEsr$>Ms\}ݏJEЋqrrO abbRG> MOV=65rvE]">)]Fa]Ύl*m ΄(d ܋ DEE)]O% 񐤬\*iOںKʔWFGG`fm\6rV?gBt2z聓WV!!g$ 񐤬\,Lq,lU*`&-;JeG.0~$8YPEB< @/Ғt)B$Cp/5R*a̬I P;+2)2a $A@@aaa\pARHBGNK3-DJ?w{qgXs9{y&?s<)|8ƚKs{8BOҷKn}2ꑶλq 9۟'ύ- y>*)N/n:mhR+TOOR=@zЮ];<<9;CP(,*`j[{WT?T @% 5y9$ >|P3ɿsк+(״]@^ ~R/5S3aWJO.Ef͚Ejj*ͣcǎӾ}{ƍW3g֭[̙3]R\9ڵkǂ q_~g}Vc񷠣R/ -UkU:^9/e_H9EGԙi$nXR؞ºV¶#(u>5k@m/\Oq215- ݟa̗o{ArJK?;ym_]}:>W_W~73Pg6&C7 ҒtUݽyEWT2N'aB\ ͉_5~(HN 䵪Ei:M]HүfΜɷ~/@޽ @Ŋu%${Z'?}%BW_x#K߳U==%=*/3mPzGeʰ:mfsS{~p>z^@_9yW~JZHй% \2:uv }tO8Qs{؎9BO~~>ϒT*qv~Ew`!ben\6h䞍rM3R)HO&E[u,l<x4͍ٙYoRm:i @֥Sކ]6O]c7r$/*FM~3v6V5p{Rm - uviG7μ$|L a `׮]+](f͚#ӦMcdffr1>"ϟ={6ua„ lذ$222?x77G>s:um۶ѦM$ ';KRuڜ{:7KJA8 Յ_Q?sޑį5YUoI,ƦnMJM~LLp>`6;`I*tL̢ VK}ꐸG<>[}W_B rdgӖ a ׯ֬_^RDVV-iҤ zJ*̞=%KlVx9y$?J*ԩSիW+yxb4iB׮]Yp!_sw&Zc[b=6=EZoG'OO+?Ȓ‚D P2!NǎW·"ʵkHC>}:{᯿RE q߷IYf{LMLp hxM^.tڒ2 ΄( ɓ+]e !r[br7[TƵP^^V6Unu{73$ F.]puue͚5J"D$HTw* JiSɞ HOnN.*LʠFr U%ij{ ̌ArJyƜ !bma-7tNh$q/D{8;Lʠ}QQ8 $22RBJ ;O$^^f:&&hZn%eR p|||bʕJ"D#HlDݰӨj)1rV;ˇbS+P?2+$DIUPP K h]', @BMjN$鴽T9)J$Pnr2/$& ?OBt2dXk.KL$4qsvr&Rr$IO %&n @ɸ9Rx6 a˪U.E2EحZ-膝&՜IPU˜hT*2ShLc7#??_uVRSS.E2CX vV ;q%7>N1LH@ЮN"!J2YgxhZ6mڤt)BxHAAZi#tNO2Qd+T0Yqq9!\Kmm+H2<'''w. Q$2/;;͛7oBΝIp:絬YTxy716\s%O~~>rϚp!.E2A(bbbXh8880h Y` زش\S qħf%ң J<;tk~zU_III|WԫW.]/۷3gC:b_7Pk״t-ׯ cjȸq :â+PKFGêU.ERM0'OdԨQwy5ke^ʬYhРSʰaèZ*9ի;v{=q/ԄѾuI V}IuӾtAc Hlܸly"Hhiii|wx{{Ӻuk.^ܹss6lW^|!郷7O>!""sRNGoXʼnUYsZQ"]Z''%N9޽',xf\`̙lْ`O Px?JHH࣏>ݝ?\~kҺugΚ9Xw5Hx:?˗WFX?̙[ o#?88kkk֭[t)BJ"Z-v^aÆܹ?hV\I۶mK.1l0 ?Ν;|T^ǯs3АkW)}~31/B*Sad rӧ'H*99y鉟ٳ7nRbg3444i±cKxx8'O\rzUhQ/v_i; sχea\4$]al8YGddtm d 8y$rz'H˗/3vXݙ;w.\z~ڵk^pƍ_^lpIm|K>(.^D#f:%Zx>J!J>Y.]ʚ5k.ERG0/2tP7n̎;1c׮]cܹԮ]<{,]tˋx:ĹsLϯ=Rޖ봏kkKCd,QT$] e|T,`'9+o^b⫍0WB!^,S bʕ9!L0'OҧO7nLHHAAAܸqӧ?27sѳgO7oJȑ# ^|9 0DLMLX߆WcqαAN6wΞUD IDAT:aHC7/7cS7»3#S:!KP,B$gw^Zj 6ҥK]FP'>>>̜9aÆq%ƌSb|xлIMn<ٞ*mգMM1s,f|ѯN1x9}ZbZº/"??_lݺTy_B/$r|}}ڵ+\p ׯBFDDлwov튗|8;;rÙ?en$3otl3R"N !,К*m ۳ yе? qc΅/NG׮]cȐ!h4˺u^x'11cҠA޽˙3gpwwSQўg\nnjʦqd{ })}7䭶Hv U'aH*9޽,BO$"1l0ׯOLL رct-((`ѢEԯ_;wzjSWhؕGPkqZ[Uĵꭎq$,ߟ KI=zVZ1rHGdd$'O_PPkfl޼奇K_m+u0b!2Tf6#tP< KL{oUܖJMNJ@¸pҥa$aqqq5ciiYh^v` C1rEz쩇K&SVx\F9VՑ퓺z=gP<\nlK.6Fw'21+2ʷ>o4\BTʕ+.E& g֬Yxzzr vMpp0-Zxj5 .EDGG{n-Z*/٪9ڱ~L6{t{nUlC$GD(TxZaof.X+w{hwC^8!J2*yLLL2dWFV+]FKPsڷoλӧܹ^v۷?dҤIocJ*{3i#tkߐysAnVBOZ-CȞw^kJ$hD,ק($LrKh;xرciڴ)*T|'ؼpjyѸqc 8<}^6Fuo덪w"uے)q=ݖThZnKדU9_ Z#?veT*YWկ_y&/@PDzعs'[laǎԪ{\/3gd֬Y;v ocejbo;Sץ<]m'!#GZ1E-nKFLBUiZvƻ}pw ^äޯb 4B QSy]" y?j𘩉 ?m^>' !tH*E~Gׯϝ;w8w3gۛWvv6Æ ϏAq97oKf5*mkxKR? 21^fbO"QZr҈ܶ'm.'d:U0so 4}:K=U{=r :{1B,+틵5֭S!R 66???~mLɓ'[hт۷yf-Z/ q8KE;Sc#6B\߱\*-nږxU>u]z42_m { ɓ'JBoQV-MFjjA_$dO> N mڴ ooo"##9z(f>EѪU+8w}[e{EvGhL2}!#W7nRS{cNɾ{WJK)gܽkqT,^]tW5:ae=zbŊFRW_}EIJJK8.00'Ot)B @F*##1coУGN>MVNN#Gdʔ)=}QF'_({Eߓir+1Y:UܿҢWl!!"hәxjlI|3-K$ܬ:AE;ԃc;~,,,8q755EuQdܹ3^ZR0*%i[la,[ =֭[￳h"YװǧZ Ѻ3v׳wN]rrӛx۷i38>7ocV 8_3<MW񸉉 &Mb֭ 0ZJ>333̪Uj2! #h"Zhaaa[oٲƍcnnNHHzk➪~^Uh֞9nfjŒכr|joquz]SZTܽ@\?+霓bҽgY:f}WT<9s1y΄($"##9~ҥa4$4{ٳٱcO)i4MF߾}0`fomI~{3~ؕGtܽx:8::RJ!$d3! #B˖-9~8'SN7iiiݛ ߳tRYVLLL`j7o~ف'iJ 82)7#)Ԫ|.c7eHzUU'#b+l 7j<~Siݺ5 6333Yf 'O.oNc&3@c]|y_iH*~'ZjEJ8s :tk7oW_ĉر1ct}H ,hŏ`^dqeo$^V- \wX9>? ]vgu8~Oܜ{_h4صk̜97x^/MH2k.K(H*8q"cƌa۷jժuӧOӺukLMM9s :uk4v}d}.LYnD~282`Z-7nT!J<*AoΠAh֬6m.o?e˖1|&OavIkƔ/o*CI|z\#$eT:4 [,#"HFVz=]yS#5̴@EeҚc(Ӓ v_7|kH B<333V\.E<.EP2toe < eddopVZE߾}ڿ0,FOϖ3X2uoƴkyѡ@a{m~<KQYc_8U¼JtRo$ PΖ/adzw}Ҏk i|,OEЮv|&n"yL8+Vpq5jt9Bp'[+fޔm=6eӹ:I;b_*5qpU QJ'vQHNΚ<ؼ֭t>q_O\#nN@RѥKbcc9uꔬ%^nn.666lٲEmDZ*|SNUQIG dΝ,_}uFTT{ᥗ^ŧ0g9~8|'[+&vb+ +1#]~6;/F 6.غ`쌍]ѐFnr YwK##!ZC*ո:~jж O8¦s7hϬҳCeJ#?;v+]xƍȑ#\tIRD $ @ INNwބn:v1bccԩ*}1bSY/_F2u]ƵoHc O@!$:qG@lJV8:bjo=V0 3kk̭011졥ZM MAyyR*; T3JKE`anFêtBOzR+@æs7%\3vkZO LJ;|0:ub…L0Arxdٻw<"9rs9.G0XuFZZ;wKcDGGөS'Z-퓵\jv>K4 Ӆ{)5;w94"3ATrshgM͊ԩdO-gUs»3])\Ker,\4p㝎xͫ:%p籾+f̘Cx嗕.G"ʡCh߾gj|J#J @H*ft v܉Ǹ~::u\rݻ!J&]ep6F7tvVϿAV^IY$g呜Z!5;𸥹)vX[lgM;+YЬLDB:dݙ\NN oSaT0^Yu=+X<\8>OrVyZVqvejW*g%ZnW&21t"ӸNhL21ɤcb]iʄWP*KG1e^yԩtIBϿ!%K`` }~At˜I2{һwo ~RRRر#*#GHuo{i z?\M提N$==>pmJ!9+{ e`jbky<hXՑ-jѨj9?q?`8qBm% @kȐ!|lݺ+]%  o߾l2,HLL$88Xty fbu]Sץ\RR(Q$mٶm666'((/o>}d Q\pƍ3gJ([/^Ç.Gq2Tzr)Õ.EC$''ӥKd0j({=Ǝk1DtyYV 8>}0j(rrr.GarPѹsg\]]YzҥQbH҃lf/_ DFF2`u_|a1DV y 2[޽ˬY.Ea* *fժUhZDZ-GTV 㤥ѳgOTի15tEDDCƍ.E<+9 , $$DrDRd  22Ǐ+]%|~Agflڴ`ˋ4 o&lݺ`D^^^J"˜1chݺ5o|c+/ˋUV)]%O?1{l.]JN 6Χ~APPO@ VZX[[+]x &&&:u~IrDRdRfȐ!][[[,Yb1DPPP@xx8 4P3g˖-ٳJ#YW:m6RSS.EEIzF'N$,,m۶hq.\ :/C IDAT("##ϗ%pFjСnݚ)S(](Cd \4`Z-7n,lj?~[n)XK3XlK.%((uls1}t>cZliqDp%LMM_ҥ`bb7|Cpp06lPQFH*޽;V",,>777ڴi.Ob#O8{J/_wa`dddzF͚5U񜼽8p SNgϞXYY)]($N888g033CVcffF~~ Qld)1d7n_|aбONrr2˖- J9st) ϗ{J\/_++jLFQL!|~ QQQ[Π߈d;jԨaqD";5kd„ ̞=dK,aĈ=zV[zN(K$=-[qss389={2p@#FիWWҥ=6mjJ"J9 @[oEڵD(K$DƍǰaׯAψcG-tQ|*U{ǂ dH,+=ػw/vvv AZV(S$'bmmmPrΝQzu%ʖj׮p%B_&O-RQ P֭[1=niXz56l`ժUlFرciҤ &M28lKzR|y&MĢEHIIQQJI*}ڵk111)DPbbb0ao6mڴ1X?gΜ~̠c'22RfJw}H,+&M[o Y'yPL}AIHH7cǎǠcIPT|yƎŋyS$QʨT*.Cw}GXXOFR/N,"ӉK&%;aÆ1c)..L @iݺ5Ǐ_VQ=@_ӦMYd #Gl͎Mjv>h‡ݚӕnpχ 8Ѱ#n&e׭шxJFWU'괫]3Ӣ7yaرc,YKlбvݻٷol| F.Zj? W2T61+W_p#1. s <sSS|k2trq5gy\lOMg{7 3O;7r*[ӯ|jjݪcI/qѡCnб 2e ;v4XlΎ*U(]0q;_QvH*4Z-/E^ULn!n.u-YV'T;W@RV.YQk5f?;K -p0sRʂkУq "YW$\×Rޖmo=jU4=Hϟϵkظqcח˗%!ݼy5kgZ(+ Sb }],+RYr(q+)6.&z5vI|]G%q5> "fvsfE{T9]ݙO G+;0x)WRY~*K0wy:7ƿ:k^%C@̞=iӦQNoFA :n]20h ~W @BodMfP~8|Vðu׾!*<dFq$" Ħd`em#XUD[[cnm5VVjAyyR*; nMG}6Y{CP5XѰ3ӅTu}l]ۯ%׫9C[&n|ح k=l(}|0aL>c}W駟|,! E0x`;\B.GJ</dkŇݚ0ONtER4Ē 6.󢎳36NNX>><3FCnZ)MKq,>x ZC-G7tkC7:ԫZ=KsSdP ONL=! 3aV n鉩L Ie:Y={p!OGEE_2}t*Wlб{?srYЦMT¦M1cR@Kg,>p'[+ȶ2K7fHVNH],--)W*-ZQ*VSSlqrɳң 7/aenF5ؼ_ţo[̺֬1LL sw]X_P^ @)))L2#FЮ];7sL*UojDEEQzuKٻ︦C@E[{nnko^[^m[ܨ"*""{="#B$~|9|a>Nn qd@>87|cݺu ~'xA&QGfΜ#55^^^GnGr^9 G=yM:X{Xts@îgO `km?{ <Θ@lJlKH,{xHOֿDetelܸ llھVZoooZh =2bcxX!: RV#ǻ/bۘݯÓ~[P1w 42mu^m!63cpUyHu /vp\oe~q9ӱvN/2+pB1B#ɍ7}v矴)јC Օu!B'Ot *tkxsP(9ptܓ˰r쾚K{;K77ty̝PV[]b?7 '$3nxgE<^U'Cw8;$''cݺu9իٳgkx@ff&`ll: ѠiӦ!66YYYG7јqOg?)?!]8Z#GktXw1b8o=}U}'~kej_^{rKwn&8Q+*rsscٲeY'!!vŠ+`Ci Ә1c`llcǎBtX}}=8H%d{#.֗GΜAoOހLt:~g[/ ljMLm 7 nxg0p>\J/h.LLim!]vҫO>lll'hxW/̙҈ dbbaÆQD:D.@Z~Fr4w}W s`₤Dlj װ1s&RfoBy!<[^bb?"Qj:s n݊0~7oΝ;rJh\vv6ƎH(JQH;xek4obرc~W!Lu @`` p Ct@%EyߛlNǮ㓃qpqӚ*mXzzbKJŎEnߴ#Wdk`@ uR'|wy9_|ooo#1e2~G,Zfffm?5!..2ucb#9 ?0/GpoQ(9,g9v<&?"#cxMCgW.sx{ΥaPrd駟Dc;PZZ7xCc$I "k@s?!Ch옄666c-0x`"* |z0Gckeo۶b_, Q`FE snH.m`B_,M_“LTWW˗cܹ4hƎ[\\;vW^1 iMyy9,,,ޑtyaaa(((@JJ (DrKW˂-m)q:G;5#GChc>2Cn6L#??|FC q iIEE@ ccc\puCr9 S ,bA(=2 6ތ 1na5m Q(ëӒڦK@/;U޴ifϞ +++PD_~xf$"Gz5(4m$srK1OoF *b1:t΀fcŽ+i;BfDrJqO P(P]]Mi2p@ƲAt@H.Wf|+anÇ1LHTe$8v+ 6+on;Gi/yf|g0117o oooh pGi7n @Q^S4 Fn#GA gz⋈k8~?=moP"@}] ,q^@ hh@EEPDC.#11u#Rzp_',s-ѣahf(!(X{zMQX)kj7 uH+&$tɓ'?htk8~8JKK1|PDsssűBtD}}=@juVbӋ %X=~A8Z?mp H@6(<﹄Jv . >L2?C&%TO>v(DGPPK+WG㣉fcޠT/`bmi~ b epN/n7\.5-lق$YFǮ1{l֔Qi7*:;/o.F#!H>NR8]ysCZ:N$e Dt򷭦+W… ;v 5553gƏMHk***```ssw&z_~qYG!: u,1 3C!>ؕ[_4?i).9?fx(^.Ⅵ.^dd?9ܹaaaJmLUVVB"'((2 III@PiP*l0/G޶7]Zx4eƉ[zo7"&D`H b|Xl4~DDD`ƌ?6!md066fhի@Pz1rz^ N΂ː4 89Kw\D]9PUG=,oݗ_~ cccL2L6 i\.X,fhH@*HP9,>f %2` dAķ'%o:Ubݺu㏙paٓf#Z%-ӧnܸDQ9~9w2|4?OgWQ iHdDD&&_ dibķ'o4*>xyyaɒ%2>|< "-ݻ7@]zk/ap4mjk`H ޽u^[# NB՚3PBBmۆO>"I ܾ}&L`r|BB=@%(--ENN(DQqۯ"k T7(اO $]P~At"JNmejׇcݙo`PLrJ׏'N,!$00pMIc8Xs4sB39Gf8DFFb2斴& &z*i, IDAT$b#z+B!]IaLhmq>cLIlpuibgN'. Q2Rw̜9i _R=QF$S#-ٳ'n߾:b:{ ?(dD /6.h۶mHNN֊ <<<ޙ"m % ?'lMF FɈ e6x& o}uB_|Yf׷h@bb"Y MB *___$''A@H x75(s=<=,%ǟ nRo7\HGIudA ݻw#99| (Mnݺ^4{ " u PVVQR(P(Tح7\J/@Y Vݻ F0w}72ycz@"ZU)J|駘7oz:N$ٓu BDCH[zrH5\H؞Y"ƖVmg ɹ%,jj65BN&QNZU߿Xr%(MJJJPXXU!-!p-ΐH$T'Hu W`t^0q(b1%fGp;R)8êU0m4mIK{0$<@;w[sZ5ȑp.<;Ps#NvBr~ +eu}ZS?~ׯ_: Ͻ{```777Qi #Gy"*T0O)BZ'?1:<^ OG @LZ>T]@WƄ зo_QxݻTJ N!p=h&8*Tqz:/ "#cFɈ0:" Kdej?%.d5"bQ̤3P__X:"T^I%Jnk?Z#ECfv0bZ7jfd]V^#F@hh((Ņ Q__:r~~~hhhhǑFTFV  ņYS{ECb1m;.6](UǼGD71#$ ***X Zp]IH5Y𰵀aO*TƖa2K66~9e5(ԁy_",, cƌa削agg:!"HPYY:rJHIIah*T[gk^[B&6-<>c\@URnH](99;2F`kK`aa ˋ @I-v^L א5˫PUpL"Ҩu`Z}ԩSYhUii)ll[,,,q6yyy=@*Ts6^[zq,$-< -,q@zÿnkb{f͛n:0DA&Ԕ/ дʬŃo"+++aeeDOOO\xu eS{PPYv*^_ystrL]adz(MVB0<֯_kkk,XU6d2p&? 2'OM\]]j*⋼Ƕ7xyyh^{ rrrԙ^{7_nCH~) ʇ.+ n<=#Yp|nVs62P#4?+҉8 x";u& 9\ddZ3|e^޽{9W^p7nٳXhlOOOoRR:th}6ok N8TT[s4Ֆ^zq~Ji/M&/_֮],իWhzz˗/s8KKKstt*{[y:zhoͅ_cӧO?q;g>ϪXf qCq\ofpX3p ܿ3 ys.o|ٮ~ƃvo'+l&>&=>{[<j7z:a\~EMG}\PPP=h~aԨQ&%פy_>7h#PcC݌דHC:~C{n曚:d5P^UUPTT?FgggsSsYYVX={6u7g_H ڿOM՟%,^xu<U1ZkJS]uuuP(׿>O5(..nɖYMS{kM"'ye$*JPuJc5<8O7&8+n8yyyؼy3v ޽'foKg_iɸw^γ4yUhP(DXck㸽#[\[/Ld<,Aٿ~Bj}BS*axA7v{ʴF B 6_OD"hܸqSNQӦM9sٶhy0~mh{߫ SSSDDD`ҥXhQEbUɦy}}=jjj`kk۩yH*dk-&&P^4|7XfM^UָxDɴ<[[g(2hhp .FNj4(*pӗNrcxp*4Vu-&+bdujRh֨ud$!ZR9.%%ET>>>իzTʊsvvN:UVVrϟ <8ɉ۹s'WTTUTTp<<<3gs?}\ii)W\\tqkkD"l*TS9BCC9ܹsm۶q&Mzj˟ɉD"NPPk5YZZp۰aUVVrG|||'Ooonhh༼8\``QIC[lmmuֵO[t<כo 68zom79.gD);: Bs<YsnoU$}NqFn>Ή\sUyw3r|7i7`*'Xqb{gw)_T%}:Z\藗^z 8|0(Zo[l*l: q΃y 6 )iǏGClyydSEƻ/l_:8~!?N{N]A \\\0i$uJ-:8|0ܨbNNNH{H=@uFhP*QV>0SCD"4d ]`kοOJ[3cF6@uuuظq#/^4UqttT DN ŋ(--g}+VDZ!H`nnNi*ȣjU0,͌ SQ`c/vEsڳg˱xbuF\]]) A&M”)SX!mJTvqwwG^^d5@p<5 *.A99:#K2ׯǔ)S"OOOTWW#//OOs*lҵ8deeۛu@le #Ep^o; ջ|Ze5hhhמ^T 7_U(99ΝAiii~...RupwwH@Pp1GF1N+\/@ŕngR[i&xxx4-\]]addD)^^^t͒v233YG!Z t^15&d&dp2mjkP  u]j) n݊C87200;}$:ÃYnTTw ym]mP__ F-) "S *8xSj)N8\,X@OqHHH`vDUU XG!:͍LT /ƣ8@ ]0S%%jk*=Rm޼aaa]Zܼyu BݻFgh\X:tAy%j#Am)@}JlkK.B/٩:U-++þ}/vS3ֲBHB$!==u'zTd/v>u\]]:aQbz>Jv@/zl9e}=3avp pj)qعs'f͚Oh1i>}PDڅ"rz {@= i8gfS(1+ȓ~8!c{@ `L?tJwhСʉΠ@W###Itt+YAP(Vi#ĸ-R0'؋a2)Ю]퍠x:5x` ))uBڥO>sjjjXG!Z999ccf{h}TMmfZ<e217}W\:̍O [ o3gɓ'YG!] BDQsrrBQQYG!P1SzwGk6kz,- EO;0 l~Z:&vͦm:\]z]~D"СCa IDAT.033apMR!?fWTuc{ ɼxCl`FɈ)ONƼ03z8[jaSrED=:\ڵ  *KJr}CtYcRo7FK ;v`ƌE'Nbv8p ._:rT*:nb`78Y6zo|oT<+Q2- _OGy96+s 4Cеkא١8r(ˀ*Q333D"H5gllƞ%",$CpgsEQ kRfzCa /2J:T&9)*bVW kuL^l@ɓ!гpdee!!!uB'''I*$z le9^D@7; /6]8Œ Ew٤  ~n>+ |l t |!<<3脐8::"""uB%$$"mrrr!pz ()Adr6)(UӪw^eankO% 駧.= X1ctf```}@Dg 6 gϞaUR =& ;`3>?t>j $] %)('!{e }OO]9rÆ Yg&L@LL JKKYG!MÆ CQQXG!Zё =VWWXw3":%x ByN63#sl ~qǬ OFS@ 8~8&Nytɓ!sNQiSPP$ XG!Zz[]]tw{ _D{{y:bF'.]P0JG4=e`PY8x>؏a:TPcχ>@xgm6QiH$BXXYG!ZL*zSTuUSCp>53x_M돺*JdhP"/0יݗ0{3Jߞ:rϛ7gϞEvvv;X}@CoTuw{e;b `wF8i.+uWVsx{edR6~|vh)OUEDDuO'B"`׮]ҦC";;)))-H=6Uu$O'hhrW$+-EիX=5+ή?Anv 7 ,ܸqC/~1N۷BH KKK=zu  HOd28Z*nj7`sQz? %Tq>=X:*黓 -!) SNÇWG3w\\x*H#G㬣-% aggG `B_Za~o_g,YQѨ+`tk +,ţ >SO]!p1g\9s :蜱cƆzN;v,Μ9\: RRcRC~z~(ݺ-}9?|$ T U|;;J ;gmt Äxٳ6l:$X3gRDt¸qPYYQ@5GB}U4.*@k R:s{ax֟،B{v0o1T†JPff&icΝk׮֭[*ooot'Odh)*@21‚M|'7W4=aB8ý'`#chnYxgEAB&*@022BhhÇC*R/ &LY ZJ*,pz 37pN.?umxn~ ]*ssmHX67_"fO&1LHR1B/26l؀qi44G=@fӌ>h_,Ⳋy6 ?; 2QBҴ4]f6pedTaK# 2xgt&Ξ=KZh"ȑ#Ҫ#G `h!T(w&] iSB0G)?CQPS&NDcP֫AFd$ۻٔכcS7qd2AŸyXl_<}2JIڣ]PBB<:w ֛>}:D": 2vvvT롺:,A!$?]AEF:2"#iv8 q=p|i8M_:|kcǒ1(%iv@W\՝KXp!vލw& L0%,pz%`v }row[lGF1噙H=tCmqg`c=Y>9xk~ل$*iW}B 3ܹs!BHfϞ3gЇ]cggGׄ=s#1" 1}sw yG9#)0FʁL&&"q,#K'˜S xsj1ŸQJv@4`]JKɓallm۶B@ `mjw _Cl~^Nֈp/33%ڔ 2ϞEV|5?6.c{.]/#fT[[D*TtRرc"sss {3JK:&@xzSLAݱ~zQiK/[uP(PZZΤKߌb}+$*e}]lb:^⇌ӧv$u랒޹]:c<ރz|0;0JK:CЍ7з/4B!^}Ulڴ FD?jKFaO'PR_D,W aǑz J cj&pl>l|b3 qr3X91t7Hjx>@׿)))8y$(hذaݻ7ݳF`ddDB b(2?fą"x܆ IvۓDq(jlGB¤$پW.AHY5oЀ_U0gI0F:g1יQr.m@pttT.Ç){u-_ Nz:1^?@nyMF:#潩8tS"i^<`h\P+CnU$rbc~RW9a4D/v#v(X2HwXTW7{Ek,%V5ĞhM6鮩vK,1Qc"vA4A3C/-z۫;ǂC:GVL,^c螯:GUXH~RWPZPHSw;1)wsDUg;s:Y<Ľ Ro^_Y5Lƛooٳqqq: jƌ,XUV1aqttM3c9l.I"AX jW:4:oFtdCl 0a单sQ~+´k&S)2SO8y]^qh4NLo\vaF@SGk2R9C\RYYL:9sHG4a;ƥK9z(ǎ:P6mĐ!CPT5ElZ.y[ѻi}nb%IfmL GnWLݱpq4CdeQutJ4cH+/mO`M [z9+yW(oիu>|wu>W^EVpSNwܼs2OPFvbjk Fff̨(* [nB{Ңb䆴r`"B=ixߵ:J<%hx[mh,000߿>4zӦM/_~aԩRZ5i҄HHO5@E4na.{k 8&kk0?lN37Z41bhk_ IOԵ<Υ^r{s+K -)L(041AP`PGJJJRTEŔd2|eOL[orUj5Ϥ(: ۘɠvL B!ַ: EKɉ1cW_1e  Hwޡk׮ݗ2**ʔam|.ts{t̢WONn݈k׮RZmۖҷo:q X'T*V^9cG?);Q` v\Hgّ~LF+O{:;Î>YPGlZ.fqCmy[S"{;Jx@,ļ''<<;W_HisΥUVl۶JG'd2"1N-]#F`jjz瘕11Ws8őD%`KTT1coI;Zak[3v M1hR)VVRY&Tŭ2՝',jNWor%M,LhiϤ`KHB:cƌҥKHGjJdd$g&""B 'r9֢O\pEsNV8Y1cpUȅ|.gr- UCg10G SJ),ZHYurC,Lp0DLaMD?΂ ċy75k#F>DhD?DJ%k֬w}65egc <ePԩ}ӧOɓR:͜9###>D@C@g1~xB[ڇ6k֬YӇA"uA5 K.e͚5R(R]۷/RGzWkTPP8~z5kO?MvvԑVcɼkdddHGxD?~IOOgϞ=L0A( Z BQ3ccc~7 JIj7)..:HHX{{{"##"***ʪ=;;;6oLJJ cǎH “fffƶmgĈTTT"Ah4,]ѣGcll| #@:&((;v~{9Q :ݝ۷CDDEEERGQQǤI"#@i׮ <J%u$Ay @@C@_BBb PGT*133,=iӆCJϞ= ,--={6)))L>%Kرc9pFF&H[r%G( Z jdZO jҤ εkפ$ua֬YO?DϞ=gΜ9bP ɰ]j~ƌ#: Z-QDGGcbbB׮]|ԑ ƍȑ#3e/_/ڵcѢEKS H?m޽dff2~xNsаٙݻwcooOϞ=9ԑxxx{뱳W^!00? # F@\uu5բ v#|r : 1@999~ڵ+: <0cccFݻ^cժUx{{ɱcǤ(?1]T5NlܸQ4?" Ɔ{I>}Xxԑ᡹{qeV^Mzz:]taРA={VxzO@(u֡P(9rQAg)p 1+V`֬YL2ٳgKI\.gԨQ9s[M6m9r$)))R[jJ% jŊ 2sss#@gfѢE̝;I&QUU%u,Ax$DDDp nJbb"͚5c٢YDe]{Z`kz-pъ'+m]&F?2ƍ'uA9@fffw1 ٳ 3oQXNVXj{OnhP} */U_$TʼnVJ^rM\lhfGi㈙qꭉ'ҳgO{9tW_}ū*uXYlNvbv!Uj5N;ZakNCyj/qi$,BYY 1v4w= qEfP/BPtR QAfenn.֏4ݻӧO-[ѣs4ck|)++1W u=?@37[lp1{2I7psz=b%F2x9Bt pX.F|||8|0~)o&v^h:츐N,]ϣZڌv lE!8Yaebl2 J]t؊IG)̘~.v#"ԓWg Gtk|\BTT۶m: : 18f̙ /@bbhi?{2x&ҹ^PnẢlGgx9 f f y;Qxbv!f}N=41WSwap+olАٳgӭ[7yZjڵkҥ$[3)Ɓ˙UTlM W՜xւMRvγɶXf~{K"B=ڗAnP}#@ϲeo߾RGT81x&͍iӦO?Zaϥ묍iK]7ē>:lM5 ]u1WG3e!z7`D[?\SzA\\cƌ{̚5zɲ1ɬMLrC60Lggےj5ǒu1%-Mڗ芘)dqdz{PG&O7#Fڵklذ+'M2 Xz$GH+#3fD[?-~j嗐[LNq9y䕨PUUSZ}f eؙ+77o_K|,x_k=SX^T֝JfC(;09 =gjٱc~-3f 66V/KfqtbPk4oɪ=lHqbRo?l痪֨)(Yc sc#L > \uFP egs%1ɬIbxy1 ;h)= * CCCrٶmRGehj۷/^^^,^XLvQ6mUHO<>u )+!4s{I78}-3鹜dno2 SF& &PWxj* ;-M v=-=ieOO).gّ,N 9N~μׯ%-ryTTGF&5)qj N'8Τņ)M)聧DVV9w\b员]H7@ d u hP+UTUWP-miv[/LJ!6-їXs2jȌ>-ijfڴib{#IUU;v:>vMllQ$Uk4tPLLL_$ԃWIII 7oM6RGzl]va v<>߈5Φqr&QW8M~q92mm[abg +K䦦"TEŨJPPyXN;_'zJX &FޛKѰRg B\myoK*^2\Ǝ˞={͔*5ˎ&s3o>J N+ Еř\*P(bdc VwoѣT**|+ u҂Bj5tw&|q$ j-^xJJJشizRRR˜1cرc-bرRG[Ng-ȕwzd@=_KTl>%昺`삅 ˳lJPfgQ \N 4dd[vX%s"1^i4;S`Uլ<~9[c.*gL@nAGARYĎMʦZ&.`쌱O}Tj5eyfeQEYVʲr^ g_ǙGLM6|>3nܸ!uo?w}Gzz:ƏxZ )Sݻ$#FÜ9sOxט7o^<18Tv=Z}{d+q&Ue5ΎXxzc兩$WRx-kgfR]]M 7u dX_,Mnf[ħNUBlrXGxcz۷^xsss֯_ߠF;7I働'I+Ű&*S>&_DrrKO,G, ޤZ׮Qt3 cFeSq!˨{HꯠkL ~GRSS" j Ɨ_~)uAGZ 3gsNN>-E&Ak׮eĉу5k4|4@ w{>ځ"Id,*K_l||12{<)J SLѵt0ߥ&10ԛGt$ɺK+++GK/I_G9x;uk4p01G=6!^Xb̰a,l(.Vj %))fgag w öO`Ionߖ۷G\5%waX[VXd֦Su>N|;sf+|9ظa?_du5h T%\eee iZEZ.1{k cՙgZo?d˖-IEF׉:Dt[ڵk3f *Mׯ3x` :b^^L§ccV<1{ibSrvsñe =bMV0ד縢sOH„[3s0F5HU+5^ kʼfPNNGѣ|L8Q,0kN]v|;3N5(+Ypn?KIEv!p m~gfrYnedƿiOgZ?bSVFuᅧ9ms1rЀ͛7O>LLuisDt[P||VLQTR%naQt^kt2ixwA@ Q#s3u'x3ίߙ5j|-z'⵵GA_={CCCڷo_cB"""xWJ/?aO㵈Oyel8N7qUeHaCE?Օ!Cpԅ/tv]̨A-h8}'*AZݦR0VtRZn-Ax@899UyԺuk? ۷'**gѣcؙ+>sX{h/]`4TGFWqTˋC3`}h4= 8q"ɕB:}I{ o HR>]+~bj'6M;w ͘1&=VNnCC뼲ik0IaXK[Z?O1ss ݿVcъ!<Е._lf 6 /p!.]ļy󨮾;~#G<=8xmex?͈њJN!>̗{/5޽dE[;-|*tm83\q6s'1ѿi,Dp-]Cܞf,|}}INN,B```{DzeXp!ӦM/@/UUU,Ycǎ=}Ok6#IY>N{`=Pgj\-&!?A3{{љW32 _2{^H*++I&դg^3YO2|6tjN1ݘx?hpT*M8x1YŴO:eg񵷢[ȓ(1 [nRG(00Chrrr>}TWWs~`b&g76MSc#%NdN.>xA| mxPiC؆/udc{Y֏W^y4Luu5s%11ϼ n?1֣9ee5]qxvr{DӰVcf@А!m~?u̘]oDnGovSR* \T*Yr%'NuZ{  )) CEE "77n Μ9mir<9w1MB~PL^SC11mߟU1nυ1kF;ڵkbҤI}%cLD\Nq9cf%P; phي0{ 9^OG_g%Nyy(?"@=ipB gĉVYY;S빘ެSk]Zaښ#u pBz?b'3 5;ʴ׹-c1}1A1c ||||1ȑ#,]Tva3܋(,#$R? sg's\۶ŽcG>v6h612d>ts7[nMRSEQAR(xzzHBL&C.*֭[Ǿ}',"]tsbnj?GH"~}0st$`3\S,Vj9Άq&4\͛7gܹpE{;#5 oxv IDAT,?z7?#;3go-\0 ``LlmpmoϞkYz7#Ck_z'+|̂231԰\t}1m4BUg)p裏(,,dӦM<( 1e L&cرw6+U|,JioUj5-Ϛdӣalid$%Utj YE/؆iݛKn0{l9vӦM[ 믳RSVE3'О֖[L/?` rmdl}Ȕ_ ^폙vRҏ}PòdݻQg V HVZEVV˖-w5Fj5999|g7h40q!6Kï_?,Ű _=?#;L+~tؑoL2e h46mD{_R&|4ֵetfE2~"ߴK^}Xv2oonog`ϛ).']TTkI#hPTb(++cL4I4?'܏ cǎeǎ$$$0k,|}oOQ(TUU7kS/ kcMEFffGFrLMׯj 20cdk4Mhd2aaa|deeǶXM+FtlVQ1"{ ko/|z`Isn6f67'RrqRF#@ NyyG@w/ 2{l9~8SLΎJ.ڗ5l;W g u۷) _߈+9/*-?H- 550?~J-bɭ>0Cl;SFX26o_`ERy{j.\3^Q6EkHL}-"88ݻKE{@Z^Zyc_&tk0l^qo^C£S'>3-Ҵν+ތ[~ƽ(ɫ1K^x*PGuS:a8:1mnd(c틽UqI5 \ɓ'ܷjݺ(Gl;5/>eUD=zeޠ85kC`]Ɔu`⊃T5Z+3Eܑ,^ZuV.2՛?Zfcʉ=Xz$WgT10[JŸq㤎"@.\q+<^yżIӂN~Z\%xxY-{ ϣsgV֌\ee6f,z!MgSY,a'gQt<ǒrX26oAY-= JPxT =&Z.l՞ٯ'4ᵵGjlܐ& … >|8RGF #HL0̉lu|s\*D鄿Kf(ǻwooh!ܷ/5uGT̂2$mSNZ禮9j{<} :N3D_:͈NəLk ]Oɓ } +_a 8V c)$'Hhxv}ktjxGEŕQxؚ@ǯT=z / cۿF,ާDŽ|C%Ftŋi޼9aaaRGF!b@ +x֔>Z^Y} nO=%Q:qϏVEkmjib;D%gKz;/Z2^]{%L(,bͬ yzluw}O_[N|g7n9[c%Jx(JAG:u8@*BBBpttСCO:@HaTC}}eH.: W($L(ݮ/v~ۘ%{tbH,\vѪU+BP;uڕ'Eh`Y̴ _\z.◷rؘhֳe&QU*x[S_=c-[/>Ck͏CX~~A -_^' p222HMM}Bqc2cS{{ R304ľys=pQkO5{4nh"[=fϚMPUUTD-+WDP0zhBPPQ(bZaK3oqr;sh҄*5+'jܵ){3%bnvdr9v%tmHN&F& o#@CѰ`Fh/OC@t֍[>wqQU? ;&n%FdZjKY|>̬?Dx..# rP"GALiUP:};&G^fE,`nn^N](d(U1"m߾#GĠAB#@@(҉IP!9;7(trFR0,\̆ 0 ͮM"VNd"L łN;E%Im+A&K_9s&d2K)A?!YP c3":KGGTK(n\>|!CSTn?˪VkaH mqއ= V+1e#@2={ >,Qzd\^V 6 mr嵰t 6}n1DO{1ZVzi mc_.͆8h#K{GTҖVKkJ2 T*ڵ K,9_ P7 ɓOt |l~`ZhZrHO}mS̷9"ʆpv`qItɔ[њZ5O*d8 b D3f̀Rɓ'q8 vOXmmzA4Fq36M(W(۹B|үvog ATJF lF-ra7U.6<0o3?*Si> K+}κ2dX;wD@@q[4k,XYYa߾}:$EPEr8"&V[P2DY-}U!6lT|ZVİ3cL1]-C"#@SSScʕ 7fLݛ^u̞={kF:s(TI'9X&|l#tK\ TH'}K(1쉢P8Zj dJ&ܷ; iB;uیL&##@~PE1 q.]tjdJh$"fEMZX@TC49XfP)Q4KD;Ks@B!'ACއy*aR)2c̝;vv kAa *%%O>$|>0}t7{@+WQZZl1U2^lr ܹsy&[#=Ǡ(â)Juݜs.^y r? xV~!ܽpz>Ue-J!ǿizr>эsMpl IN 5EF`q,,/~} VBv/ y6=x;Aqк?O{^tikX5NO ZC{9jNVj2ٜ߿Ua?i <\ ːpm`;rR4REn:<} )XXCMP IDATߙ! ߜ m[xicm.p_L|=( އyܺ~Pk`e?,`۷oGxx8 伆 f͛FCН?Duu5>s?666hvò#//}"##1zhlٲ999ؼysϩctƱTRJx1۲ ۊaf4U(Æ|ʒ<lGDmhxEqJ7y  N_FUs:+@l#˶vy;t1r,Ui>pLCa]GX<Z\{gg秗ѵwtX|r]?@H7*d(c[N{=Yg|>4@pai,  p1Fy A4M#MfΞ= ?~<}ԨQM(Z{DDq5q߈Sl-`P}嫉n:U۬;@UQBz{g35޵#=CG{kcXK6 Flf=PgCUȳSi:`?fsZ 2ցCa3t<4.<o`=Yg|>4fCPUoIB۵k1c FD:e<~ҥDrr2#CU3_O ݛ=++1:XMyxaG^ŵ{U mʺ|O4OQt>Mc3s'=9:QfiVhP[tuօiD( T_<Zi3_V_sA߿W.e~~ݯ=[Ӟ׬3>2Թ ~0k ;D:?~<~tvvFYY***hlMEEECSc=9Mu'/\;_У .Tc|@݉ ꊆ35?ϣVOKǒ +ߴ}Cp[gq9Lqv|43߿췞C퍳|=\f #4yM+B+Ji.f;"V!MO`ma`Hh{=>Z>\?S?dL"믿Buu5V\Xr^C\őv[E:1eʔfG/ ST033c::+UVA*л?x뭷pbbƍM>V^ÇC(B$ĉXp!>VǨRǏ#,,LyRB^^ߺ T Qm{[/>/~^/?UC]#F*B͕]/w,|CplQ[_V*89%~uq[yڶ~u}Q kӱJZ FG*3<Pg?mFT um%q>Q82hb^;VmZ~O[cL<@Q J]:iO535e^_|伆x)Չ,YBS3F޽KEEEQϧ"## @ÿUVVR֭zMQԔ)Sk׮;z x*$$FAwXmU)SX:Z@kц'Xe7z2űؖ֔]X5TjHE7B%:wsMf+e15~Zk@QRvaQҚb[)ԀC):Σ>/ʆ=Of}lנm1$PT_)KmaEYަcy|7(sAR'fwvi+?Jkt/L|B)>)ߍ4{"( efNq(Tv-Izoۛ}#7&o֟eR)րܨ[2KZp!t y Ab jܸqL`:ow"88N¤IZa( ރŜ}ڟyGWսXM+Q#9\G)c:QHk~M i,Gr1c(/qz|}oe!L_iOk]8ml8ye_g0YW_ltFuUĸqu|XB1}CKb( LZ()`:HFM-WO;}ˉ3h'>TJ +ssrf!L%W-_Ys|żyB:r9,,[/KvZw^g?щ<%‡wg0 Z(XҮ"e2=IG+)Gk9X@-4 aT2n$V_JJ"\.Ν;|rR\FvvvL`\,ɓ'۶möm:Nng* ;1m8;$!8 ?Ou߮:D"uTaaְ' u_)A`%V{; E,2h´Q- z?Է tf(Ud_t?P]]իW3 T[[ ///c0 65k'|{G ` rSw) Z3a`3lBM[i"{r+N9lM`Ń9*} HDR"t &FdHn݊3g25AD$֖3lK,޽{ tD'v#O(9Woʉv׊N\>N6ZT?D |2n:DD"ll{kL R`ot|o 9N|W}`@6rؐWW10%* ǹB9IvwF-"HV] [KjRNQd G8ٺu+F!C0 U^^gggc0 \8zУP54uLX`2”*A^Nŕkp )p*Rm#"ӑ֖RRׯ-!S+;;ǎڵkB͒娮+QgfNz$a\H?QE)UVƒRVXj)<[xd pDZI5jmCx\.dom J[N##@}vxxx`ƌLG!f   X{g;wɓ|ZB,͸sÝ" a#dh#:/NM=Ӆx9B^iuC͂O2I EQVVa;o)@wG G ݻj*pAtXYY2p)SO>1zۅBZH_WHb(Dݗ\X 'k 3s"bs.E=+ed"LRR&E ? V˗3 ZTZZ7@.wō7pyC?5F"6 `ֻ\6DhJ&anRP*FwfGBTQZP2THJ`cÀŎ\AB^9}%RfffdiZ|?>Z/xPnn.zAСC1qD|駆~jqEXN[Ҍ LF@'W3Ku"C dj!`(a*$Mu_Z>HRr'96 nV87d[`=غu+"##thUnn.a)ƌQFaƍL<=Af6x;j&}ۓA^VA)"͓cjP/Z[tFI4&u ײ /H JEie)zگd7n@&99ϟ'K_M?F xwpI$&&2耨^8y'v=ainBrH4MQS qM-& {~=ac:vVt;I^-/Z&g(aj <748VZx$d2;&Mb: AIVVzt XPlڴDD Bi 0"Rͨ)ȇ5#z7Nuh)N-ZxBRNRId@T 2߿k֬9' `:Q`_GAzz:1vt%NO EM~> CcVg_J-}8,jr F-HZڏ$AƱOJx78Zt}P]PH{BGUv6l,y:S;$brq_h,Jamm1S*رc.]JFggg﵉hpgСCqQvq759 &#Quv6ϣYڧGghϔ`op,20Z9٘ 3NҸ\+j1{bRuÇQ^^_~(fiii`:`SbĈB~acnڗG4{?/x\NCsB^9r"<;ķ޳C|W3 A>wd cI8}?rnA"3$}ט>}:<<#.-KL̆NSܿ0*5eeUU106t?6-Eᯛ9xzsH"Ebb"Y0)* 2dQQ@0az-PL%a[  ʌH+tj ƤcqXm$s.GaWtiG #Ѣ6;KF" EX6_6d \lݺÆ CXXQRSSP(H(`ƍɓ'Bh?7t+iA/<.2923z }řcP-S e!K8lVFCU=hUjƠ23j0_73P&&'11dM4tPL:Z-q6X82!{0荊۷LF;bI8$q_}0%<?ҧr4nZ-*10 ۷1wx_ZJ8v;[x ߎ;{(. i݄@' %%Q6X82g_}| jJ )/g(4F4K3nC{ň31҆6+苪2U),}f_>?VX333DDGG#""FŨ bΜ9X~=T*qVZbH?|y&vĄ~rFYML*OMTq< 6>؇`m4xz.}D$vW70'y95U|sV g'd \߿r/"Q]B!tbT|ž}BkU^ P7>^~c+: bE1vX0e4dnwSZA>>{\&e 7dRT*6jزe Ν ggZ ZE ]Էo_̟?6lL&c:ъW{L灯ޡG zQn"5,< Ks0k$^R}މX̌fItUIIxx7k) [pjC"j .Xz5QbbbЯ_?8880Ũ]ԭWSSM61hAtF Τ6XBQ_Qq1CR+(} /W[ˆr_M!0)BX56q?;,Ab""\wQr>Jk?{h,/[_1qD 4(ngΜĉatrqq[o͛7#,lFɁx=j& Bql (LJak{Owr>V\}qcN`H*|Jk4}X*%JoJ0o?hhWkD" ?;X,@F֭[8w^{5D ܾ}@M0^{5xxxwe: L~ ׳8VHkVWCH6޻z6#ȱR*^}<| ,_MX T$߁J"mDW H BO9 މP!Sn˖- IBvycرLG1:F[>o8ס6鈥PB`BSQ@qL \>!ΘjC6сriQ01_],T䠪焁mAJ%њ`ŽH$`X2U k.Z <q5cLG1JF_ <o̵7v=, Z>xr7/CC6R^9"|\s._?;3usnvJ%ohф)Rd(Eaσvۚ1p 9S"lџ0f޽PTXjQC=ߟ(F$>>3TUUa֭LG!Zagiχ+ie`ߢ1(ztUܻʜ^>v e29UcǕ39f_!7=1(KIAUN6 aws1Q!S"jlٲ ,#qC9S2hD7|}J*ވ腗2 IDAT@j̷yPPr:ތ Ax7mou6LP̑*1B|(ll異.KvJk^Ǽ~D"! ȑ#ǫtTddd&Qu`gg?(Dx~Jjx/i!>xatF_tģj?{Ax*}i࿓rz y3G0MV0|{..7>{-9Ȼx{e¤(v bxom~ W3G2N?$ j͛1e1 :[s7e256n܈ݻw͛L!Zd `Gmp tDQ*9a ._O)ʼnU04~ Xe, B}LyV !1o%v{+s\= E׮3(Dӧ1%ޚ4vۏ1GB~]2=l,Jdy111'&(8pfįL&S1n8 h4L!Z_<rQ㉢K\@Ae,qnR)w! Ͷq)-Ep[`s ykx@ =;挂 .*2JGtբ8Zg~|8Ft萄dggcΜ9LG1j&U;;v0h]s#3cc >Px;qxCq|| G IM~>]S jnbt<{h ZŌ]g p~SZK^%ά} ɅmM;Ksz 8sA7  Px N i/SN!j~-Ad2|7Xj홎Crƒ"Lo_~%Q60q 67]g!Q4nb?-}?p$" ^}va8uD/šþ{ܿ=,#IpL.{ vrNJFFVܳg,<_?OD;?/ v$S~'D"avڅ@3(& ={>~lmdmD!W(gP7.gaے;9gN*'ݛYǏ W^B[>>q/ץ0וeO7H)|l85>~JDE=]GӦm*<J5~|^\͐LkV_bpu%eiH$_l2 ._F@@V^t"H.bʶ:Ec|t??Dtqi)2ŀָdM7w H/Ji`Qx|Jˁ7Ӓ̣G! J}$Dh`|H'QR#ESnoLF^^YT0iB b2Lrغu+N>'N0h#W;YR,w jml ;3G 61fQ̓'L>Vio;/x~X_RUcޓ1=ߞN8v5y ^ rΝMp,6L .Wi0H/OˡEl޼O?4BgN$bnG-w45+Qř[xx8s(9fDtTXgd‰Ց:MTCg> %5rŋ;v,qLK.1 :ٳgbRL/&OSNaҤIL!NQ%&m8z\l,iUK0uY-pӇ]T(DP+ǍvZcSl:~Z8sioR ^>7"Cq0N-=kRIyZ]辮s8Yӯe˯NAO׹ltڒl 2dqs- <'NSO=tX… 7nqLJ*`ܹ;w`cctJj?Jq( e4x~)=Y\Q!+~[:Ng)j3vAra%DV{{gR 1sYrfj0g){XJj Q M}5O8gUεn]Yt,XIIIHNN .uNtӦMP(իWbr\$ Yfoe:NURqw+ 4S>`ːp5 $5]JW@TZO:{d@(V'I.|T1Y ]pʼn:4Z|x<N߂7|$MCQ,.1:l@]9 bWDӁKO8J? zLV)j\G u+r9JǢ<#KG×3FҜ~oWgʄ l1v{]O8)S@*ҲtA֭ Yz瑞7dӧOGjj*nݺm?MK)x5<9.Gki*ofco1p E`z%.)AWUiX;aδ †7M,針Ϗ(_Dry0:!Xxhk'' MX(i(O<4?c{ܯZy{/FGt%w,ZJ( Ɔ ꫯ2 : įٳg3$u ˱i&t%sxG( u% p9Lǡ";SB|qҽ6.W(¼/"1_UcJo6X&wǾEcff|n928틞ÆܺQyy݀R,ƻQFdHyXe[1˙{g 0O?Ŗ-[G'L֊+pY^x@{nZ ׮]Chhh P"ҟq<9G)M)\å"{zmPXpa 1 Jo&Bx>m}vz5y_od1pe>t- R#0-G>š\,S@ m|JQ2Lok+Tx |#}(XN_|{dgg3 |}}1|(a߿ǒ%KctDQ&M2ܼFS_ˑ= "tlw6<#(c;wP~=p`, GwZ`A+~S)xaTlyndD竑)8Y}0eUNņoAP)0=ywj Q~6Ϟm3࿔ jdJ|7;wuQ ~ڵk=uJ)2e JJJ6L.]urHH֯_Ov{r*DXK+ª0u(쭚.lO$cÿ7#K8&S'.)E;ͅ5ތ ꎔiv^N;F`|?ݩaOc Uճ#`εA QR <{9|}= i5TefBxDJDcS5_"8|\gwuV֮]$DGG3ŠT*틩Sc:Atȹs0qDt6oތ b:g>=Ktrw%NƩyYZ!7Z TBTQNx{R mr3JXVᵉx`iFF}H»c qtaPkz6IFVY5l]z86ǴP֢<-R*0}PoDHZŶ)D"<3|7;O1lhxbaf֦-VBM~jrQ_. =K-J o|09w*ʡX\J/0<^K;JˁM/o􆭗'(p"1rs AMY<m|? hv)vNG].IT̘18pQ Fb>|8t_/Frr2t- (,,Dpp0.]͛73Уe57pv.F퉏6n=ٴBHIUuk/oK'Z@)&?|h1'zxawvE-6_g"g@d沄q:!GXe'/STÉ9=! p`^`׫̌ T&?|ԖWog9C`{S띺[WC`aD'vb:9r3gDJJ th7TDEE~`:Nm 矱d?̝rxM+D_OzxTkq&ǒpN>ʪ%[ n໹,^hB\ZIi)R`3Gd{c o8XۅB|y&$d >B}&H/V3щDȰ`^W'p$)ǒq^j ak77]]a7\@iV !)-L" ´Aள?Ճ4Z '`H+`l GnለV_6 ^^^믿BO?Eff&\\֭ x۷o֖8D'|KBiW'a}ak}ZB!Υ!:WP)a\;GX8:gk<[p;FZZȫ J\ 1}0ܚp&[BtB0wxO* ӠTk7< &+0ֿnKp~1.gfnj5x<,`f {{4ov SɤPԊ*Hkjbg}]1"=ѿg˅ʕ}|s.r"L ;Q1ԧ-9s>`:A={СCC& +WO>a:N aؽ{7qNt@OX?+"1ݡ* $Tf~%TZ׆ffꔘW]WRq . JsȐ6MD<==W^a:A?gϞe: Atŋq9Fw3c]ǏǴip1L<8D'+)6i%Y}vSiȯ#BZ*rTJP5+TPi wfњ'kl6`F۹8x#saqX=t">@\d*5z!}0%ػM#Uȩ!W(Ӓ])Q@CiQ-U6לˆ,8σz;C#J YSTA^NX3{nOo1~xDiy%,TЧ~,KAAԕI=Tڤ#44'NDLL tuuPիѣG4oߢ{tq%. }}}ݻ.MB  8jjjz*}Hr S$f}^Q̂RdcWR޿ZII1bQ^ JP⧆_ku]@XZZ"22v& V^û|$"1I9xs_rpyLE(D9UH@YVj .h*IC[I%)ǃ&yzÇGrr2Zȋ/ƭ[aaa NJ+^ztUlȐ!Xv-֯_vBW^ׯN8vBN>8@>}rrrliiiiƎ;`ccvB$99}… jb6п&z www8;;#,,840qz84* $&&޽{lG{p8z/]vANN ,`; ! r1ghjj҆̀ gmm ####;;84ȼyеkW8::FճgOlG{n#iؿ? )m. a+"""pqoSkZootAp\,](40[nFeff3gΠ(|-??lh2w4u"|Çزe Zu i߾== ۷84ȤI0f]&CII َZsoooXF_χ)&L@֚@1vXlݺ+V9︹ɓ' b; !}?~|`;!6h ̞=k׮Eyyן@077ǵkwwh#@?~޽{|VHuI۷g;@ǼyrَDHmݺ8|0Qi4FFFَ·Ze~p8\([jj*lll`eeCCC* ##i mmm,X[lA~~>qi5k؎·vFUXXݻwںmDZ2̘1JJJ`;@  c8}4q7p\lܸ(4333DEElGK999PTTd;F:x ʰzjRo .DLL BBB --vDWXYYx!qlٲވb;!bذaԩ5CF*));-ZFHǏ# ]ta;\\\`dd$%%z_пX(4 SSS8|';;U@>>>˃=Q;w`ժUذa&OvFP=p8@EEFFF(,,d;!_%$${֭[ i5,--k׮Ռcǎ?>TTT؎CW`022† ؎#'iii#-- fffxlG"1w\Z l!iiiaذa4 rrrՌ#==k֬a; !_O @Phjjٳr ֯_vBe֭HKKQifff8<>~vњ Jl!ǏCFF8T5>:m۶!008|~7lڴ l!͞=AAAlG*>}}a߾}8|0v8 0lGBXX؎CUVVBOO wfϞ4ܹs(|!88(--qE^0p@IZgb̙غu+Mlahmݺ#F̙3vBHDD>>>x|||؎Cw333ý{v ))).~ شiQ/ԩSPPP v$Bo߾Xb쐚vBĉL***333tԉ8)66FFF000'qH-uѣv$BhӦMh۶- D~gfr]kѣHJJovBꔛ)Scǎ ۑH-Nm۶ŵkGPaҢIHHAAA|2q.fffÇَ{T^^-[i`llB\t:`T5uuu\r5kPNZI&aܹXx1Z6`ݛ~>E3gΠcǎl!_@P#ٳ'._[na4%hϟvB)N:2h|;T^^CSS8jڵѣG;08uNoޙHOOs}_ ==:i׮]prrž={hJ9]DPPq8#xxx47nWWW̟? m͹v211N:U4/k=PTTw#@%%%pqqСCakkv@T5ݻw?3َCH [v_V!))W}䗫w#!))3fѣlG&My+>!-Ή'`mm ;;;l޼8p~֭[m۶l! &۷xiLǫ*/~=,, GƋ/лwz=1^oK9ƗDEEwވF=:u1vB\r&&&5k $Dc ~jNNN?>8vBjB`` ***`ii,_[H_F6;vG:8::*!!!0119B\3ٲe ֭[e˖,")++z7w0qqqq 0'OOyMzM9ʕ+qqq())A__vvvxqǖ ѭ[7,Y=jPg}RRLLL ''iiiOx_翏s}Oc鐑,,,AVV***ǏkdϵsE@@\n1i$HKKCNN&&&HJJXߓh{]>|aaak׮TRR;vl!ɓa; iV6l`8vBjm6FDDsN 166f233Dfܸq ի>_A_/--dP{.hkk3kܸqLxx8S\\\|6uTw^)++cbbbjg 00aaaLk>yz_}„ ̝;w|ƍ ~=)KǏ̲eO?ĘTnmm`~jhϵ6o޼a8sڵbccyyyC͛7|Ν;̄ j}=jc4w^FEE嫏k)IIIlG!ac[[[4*XqF0lG!3Ljjj ڗW^1Z-ŋOϞ=`j=VXXX^,9sLSSSe[jUU_u@_йs`,,,%a{{{0 9rb֭[v҈bpRCff&Lyy7())ո[ :0j\b0{XEEE neeU\uuuf̩SjШQ}I}|kUڠlߚ+??6.9Nc4ZoooFJJ |fff[?_}o=Vm,,,CCï>%ذa#++[Lø42*Xp8Z¶hFNN177sssGGG[nt՗~}"""5?>֗x]`i\>u|)**jp8L6 AAAݻw1a$%%ʪF,Ϲ궼>ᅬXX?WEEEL4 m{YmM+Gc~FXiiiPUU9III^(Dݻ+W֭[vbo3gӧOGii)ۑbhh;v _|lxx8`Uvʾ鼒>%266ܾ}ݻ==o:q8&jwV>}:5C 2VQQ{gϞ=>|Xoܸ?~|5 aaaHNNz7oެwk =FS?|PUHT򂽽=َC0pppVYƙWݼyeF|8TcmmHKK3ϟ?1j9::2LvvvU~u;0>sIfUeee1]taTUU3g0YYYL~~>hkkW[p| ZTTSZZʤ1 ȨqL^СCU]^t҅qF󘛛3_#ի_z/G3͛7UUuq{C_RVVƴmۖٺu+ݻj] pfĈ1ј`SNcòe55zO%%3ƌLNuyӱcGW^LJJ qRZZ0:::LFFFIOOg̘3mڴazŜ:uu@/7ӷo_FRRc^~]fժU6#**(++3SLa>|Xq_;ϗܿ`QQQFNN۷/ZAAAn:FWWiӦ Č?{7dΝ˴k׎bL$%%}fƏHKK3RRR!z/un?Xlӽ{wa&**144diiifLttwhX|ܿ^QQQflG!*779r$Ӯ];毿b;i'-I||< QVVWBWWHBOOy&؎DH/{ >1}tvfWRRa`̘12e qHHHc̙lG",O$&&ڵ NNNXp@y%-nݺ("??HƎ 555;v(*::]tQ>zzzŋcjNj͙3wALL ={իHIIĉv$B`aaDܽ{(&**E}#F4*9999sGDDv4"CUUHNNƞ={ $D`H233Ü9s;DEEp.];wAi4iii033ݻwc,_XзVW^Ǐ1tP<{XD@q88;;cQVVv,"` ++'NѼ{O<)::? XBZP>lmm!߄ 0zh<{ jjjiJaʕ+q)aԬ4+qqq̚5 ~~~lGi4˗/!%%}_~ANN$$$PYYZ.___Ņ u- 1vXDb& IDATDD`l"|0 v|=KG D׮]َDԛ7o`dd|?C a;߇={~aܹsXjM[h|}}~\.ڴiݻcĈ055mgYYYڵ+,X;w69I9s ** 9ۑH+@#@O<C~ vG1b=v$" ---9rЀvލǏtttrrr?򂏏OeZ~=$$$aÆf;'i}?޽{TFC֭=z^۱RTTիWxbXZZbڵ4=44رWWWT攱o/>FJJ 7on<7:۷CFFYIZCWWn7p!HKK2T (,,d;@VŋqU#66HŋQYYY(#@u',,nݺƦY⯿޽{;iWDEEa̘1l"@8x |||pyx!۱8q""##!))ɓlG"---~-BpA4yyyǢEhi0W\APP<YYYcV RexڷoÇc(((`;0jjj{.-[9s`(//g;s|2ڴiSZzDEE1w\ >Y2յYG_YY0n8t>}:۱TөS'ɓ8y$vJݹH?ǏcذaUk6VݻwGPPPs[cʙv,珉lf9'o=€ݻw4*HfΜׯ_2e َE̜9sbvۑ8q"nZ~.:uTmz\\\ؠ_~XhQ|,^ҥ ޾}˗Ӛ1Ҭ"uRPP7oޠw6ŤY޽{6l&O 777 fXZZֺ._ mmNp޽;~F?ڵk]?7oބG >ux޽;Ξ=SNС۱0 &lڴ :t(:ݻa{n899aذa8z(؎ETii) ŋ5fbbbزe |B̯S2!.n$@AR RbPCyItj'[폨pŕ+W0i$F>_4hLeeeAEE___t ݻwȑ#i4Svv6q̜9^^^6:IHm" E!117o˛!z sA||# 9ť)*CJnb3]T h#"n*A;*b2kdOcbbн{w a֬Y8qD#SW5TE Rϛ]a޽055e;!oA l۶ NNN7Fv4" wGa͚5@HH{DEEQQQQ~EFFBBBn݂y𧄄ҥK_}v˳4DCjj*ɓ' ZZZl"$88 ./i]`aaذam{+QHXE)1 ΪY4B.op?6 b?H.П[{TY4 )֨6mk]pÇ[nz^_\.a())SLa;!PD[XXlll]l""!!￱yfAXXX888`Ν3ɝcIha/uL衎Z ,rTICdFƫT0`0f: Ȩ~덾ׯcٲe\\\t/n>L["롬 4JC ?:wv,,Ox |B}k+sM%CRN! QPOkz PVEaY*AQRbPv[hZy%'Í`X4m %%{BSS7oބ7t̙31cCkHFiT߿ڵk~őf +++DFFٙFH'gc{s=`Ɉ١ke#") ϒ9HXw{ALB b,pJQYVRTVŴt1HC }Ք_C ;*{]FA b:X3/фP]9uΝ[g ziӦRRR>.o9;;C^^4ݍ*H|2lmm 6mڴa;صk6n܈]ve;i!W!4:UjlSx {wަ!<.9% oشideeَEHPDLYYv 777(++gϦ Im6i\=NOpU FvUݸ(,@D\LBBBZ **RV $ife(LKGQZJP 1tUO1k`v'%^ԓwn+MSa /ϟ?vqi6#-_ff&~7ӧ۱i*HΝ;w^`ӦM9s&۱(++31i$8pv0o_'Ue Z㹼O#%Up!ꚐЀR3"%% 1 ߃bd!#^|lGA;a|6JJJ ##STҥKN@a۶mعs'455鉱cDzoBi6o߾o =;v؎EǏ1|`۶mXx1XlaL^^I9\WXtT۴!ӭcʇo52! -mkiCTqi* %%#/!I?aI'8&Įz^o |6ydBFFI_i.^+W"##7onQD_;;;cƌpss۱H+WYY}aݺuԩ U^ ],T%v|,21 /to(JࠩŹs %%'Ob, Ҝӱab FU{!!!''',^bb9!u,]2228p&Lv$8p%| Ei6Q甯Rt &f@AS T͜充H911h'-a Q#Bɹ>v }R"ir+W^^XhDD#]BZ*H:tڵoSlҤRRRtR\xVVVpssQH>]Tw"v`ӔALSqM*5~H*}ۦ`ccIIXFHCQDZLڵ U`kk qqqV̙3XbJJJŋ>p?6 s?]TuOp+ȴWFC!IE.ax]3b,%MŋXf ޼yKbhV0/Ai222{nxxx]vXz5,YBSH)))۱}vhhhӓŵ`oĊ0XT AqXr,%ع3| |xE9YXgNkLl-"`> 5櫤e{py5 8p ۱iT-998x TTT|ddbcc|r\r'Oƾ}v,*Xz>fApؿF߂̂R,>vǡ]7¢4/otn/cV#_(deqvx)w#=\X 1y?tf),\l߾^^^ƍaeeEFKqqqC ''%K`ʕ i/^`ժUy&N wwwtL_ z^ArlHT?1SWPnNXJںgg#M(CТ15LTxXv<`aX2#$UM`͚5ٍGk8p a`mm {{{()QVnݺUV!::ϧ{A4L Ns8H>{U fq m$5~<"HVT vf2j8_|]3b%U***WWWNNN  `A]]hx Ě5kPVV5k`ʕgU=xbl8T:v~ 9M h4i0)uqt8Dwu|שCh؏G+RSSaaauQBjAiUF`^ZZZlG#Lnn.6oތW^ؾ};ƎvVQ\}40j_y ;,jzCѾw/ $޼Zml=rՋw^bى4JKKqA#''666Xz5TVpwwGRR|r1hy-֬YsaܸqpssÀ؎מ$fbKX aD8Pe1)DµPl/k Y}%9+PԔ>>vB^^V\e˖QCH=}! ,]z*0j(t(**b;"i%t邳g"** bbb8p ƍ/^/P鴇jT@];t62R캈̂j[y =g,l>Ou۷/֮] ccc|TROTVMHHcǎEHH?ر#lmmvDJ!!!~:rrrп̚5 %J1 Jjk~*y<9| 'AgȪ4624e  +1=i0y S02L4"*# Cjj*FEĄFwɁ'ك6m˗/XI^W6mvGo`w#Fn7(Fl_{QBvb?8OR–ӧعs'{{{̟?bbb_2!A !ҡCY 8fϞ ---8;;###툄)**ј>}:֯_^zlkv)}4a7zs+=܅r~TI%%hGorDtc\7YJ2q\=z  As!66TD"U޼ySSS߿5knܸ<%JMM ;^~ ,\ݺuÑ#GPQQvfK]ըjS 1 jh$ "CSHvʱ1&,|ocq9K [xxx@WWPUUŽ{L2BBDΝ; طo1n8 4b;"SC\\~'PSSQ\\}q)2 '8]OJ?-/{Bn-[>_QGo7"//wFnݰj*>DHHv4ر7nugggdzuuuxyy!..fºu렩u!==x%[\{_QՊBF=idd>r$|#Graaaekᆪ*7?3I&$P:TAY\uຶuuq]bHB =$^&ei?k5p'9''e3u}݇]vپX[[cʕ'|TDGGcرc]^鹟ssZ!<ƌee=kX^{{>c,Z999غu+b",,,xb޽Xf 0k,8;;cɒ%رc]SSS,Y)))/Ѐ3g"66[nN.l=}T)7 kww;[AfLjQ *lxh$,q%%%x)S7nDEEV^ 3QO[DPVV͛7c͈̙ŋcԨQ<^Ν;zj:tXr%-[ft&ȭnBҋs`"~*ACE{'=.Z9n6Ƕ8j,d"6yuz0Lzciiz˖-lnD= GDၕ+Wرc8s /_={ 66xW*vd$ f̘";;wuyFVq|k~);s OJI>=a~z3 9zW5b{_3#ܮƇDjo"$$'NDvv6>C7`!8Dd$z=;M6a֭(--EPP̙9s`СbH=@YY>#[Jʕ+1lذ|yg;dR)~yj68}&A^דhDk h0_ݕKAk `i&R /Jc߾}xQPPu1"2rzزe ~024|p\Dg>}>;( ,\?8ï:_{{?]gj:᫶'\9@E v/<.- ʎxT~&lذ6l@II Ǝ+V`֬YsfB^#@DFN" ::oΟ?<{?(ngHpBESnoae#ށT7-[wMz߿χ/_wwwšBBBga"ȑ#hjj–-[~AAApssÒ%Kyf477. T3f`߾}’%K~zc 6@*BCG>xյ*ڰ)1&,EJq \%F)(SS|t0]P|\Z=ΣK/aܹ(,,Ć ܷR0[nn.{=/Ą  #Q_gz>|۷oǎ;PXX[[[?'Oɓ#vdDa]5jVX{%%%}e2aU[[˞ﭸd ޺K뻌giMCƭY4Eg~jq+Ǽyp!hZ3 Dj4lݺׯaoo˗c 鉨c"RRRsNĉPƔ)S0yd;bIF [lAkkeC\\|}}tz=f{GeX&ÖOӨTH-ޛ#%ܞ\Yk㐽^\tL6 F"| KTWWcʔ)Xd fΜ 37ED`"spq߿GRRR) &`9r$o!̝;۷ov7\ رRLxw2^K<);U}>'e2_kit:[,m۶aѢEP՝&@NNݱtR< BD= Q8СChii&NcܸqpwwM憎+gbbT> ?`^Vǰ_ZgM` ?TW!k6xn+6%!﵅ /୷pa2133æMp]wqy"% ;wDmm-ߏE!55K.Oয়~Bcc-mqFóWjVxb+P5z /okڃFʔlz4A;a*l+1%rŴipW\cڴi駟neD15ÇqA|8ƌѣGcĈUI ,@FFj5hV+0{oܞ>AnmE7`_&AK?%FǮP__娮rB"" "a8x 9G"33&&&:t(F1c`ԨQprrUfJR𻱩 S>؍gEX1C*_؁^qڱBFګ\N\\&M]-Q/DDݦpܱcǐV~nnUy}[ߦ|ZYUZN'nmpz=?r99%uIv]n}.$''!z'PWW 6 FBdd$F {{{;>Vڡs5ZȘ8>Gϗc|' OAj"wHD ![F&!22+WĦMPQQZ NNNذafΜ 777 >O>${:݀%)ZM~"uE vK/'xD^%Zo68"pn\`ʕ2$$$ >> XnT*<==(DFF"22"_uETA5"(=]I{x5'P3+ #CDD݁DٳgcZ$$$$_ņ /Bjk*.;?ҕY{y ## 0xY @DԽȨ~袆$&&ԩSHLLڵkQ\\ P4d <nnnbgVN6RFZI-| 3 q<wE\$ed+s+kz% "2zvvvW/4D|c|k׮EKK $ |||P 0AAAHWb1S8!ERq|5WX G5J!,svuqc""  AHHHm Avv6-[ //㽽~2СCO駟bСx1{쫎^*ŶSLaV-NG]LԋvZZ#%-PkuǗDԳ1"vvvFtttm())1V29ryyyχ^[ßakk7fffhooٳgpBc=.fthjZq] =c֮D u[Y!\P󴳄VGR%x6F1b.~ߏä R0 nnn߮W: k^ýދ TJ!U) UNqtvMPުif"DDd䄨nB~~> PVVTVV"33DII!$ 0TVVF.6mڄocǎSO=ӧCSnQAPt:ѕ Դ?ԉnQtOGGjjjP^^2|ǣ 1vرc8tBBBЊǠU`)J%'@#S(@B!왽 ZEz  "^p[\dddԠ3f@SSU{qD(33}Q#/{ӛ[-/=#HM`=~5uU([2 2{g؎ ^Zgs.'7^101ilBn~&TJ-fkDԋ0qoS*W j:{L8;WV Ը>j/Ź!UX pvXE- _{*4v|W} #z _#$X_y9'(3@sVc!mvVP7I;Ոn$""z444t@"@" <<O>$mۆj<: FotQWU67a=&ְ:s2: `3|V~O|T|U_;q#k5ߦu3 u׉DV T ok888`Xv-Ν;T1caJTMVPV ) VG_v;# :xdU_;q#k5G 9DDo#""H$2e ƍpH$׶fw\ 4dvguU)^{sȹZ ""-83忱7@3oevNPUBPUvdPWb/u0sȹZEj]э-pDDt.>g62a1Uca3|A%51 <}Ӷ3Gz:U+9йZ^{=c&<u ""aZu _J?|S@ڌxex_07EiUƣ;QRx>U_za %$堑7tVc^L#@D}x 0+ &4:u{ SԷ•fJ{矚 H$8>ϯE0߹8!OEGU d6׾eĥ?s#g>DDΣ` |azXh~{[\ZXWӣCk\说7٣ew`at>k^moܸ+M$Ts. U'NeZ OCE쎈z GDD7L"m}mpR}JҨ`..wuc""heaqR@j#4mmpC`!DD׍n%J[5_'+J:JT"ZP+oD Ws"ކn5 jjh;83m@5𲷄%" "")>V(m.|mo(];U޹V _G. "~ @DDtS|PT &wD UCQOnkC{{; J8ZF @DDtSBѮ"P3 mu"vF=I[]-`p?GA=bDDݔ0{H%H)!^hgkVWGks8nfZ bgD0M1°3k? ;0Mt@rI6-hn(KTU tRJhH]QoDDD7m+s+y02͕5F= M&ϭ"ފnW4!ep@KeQORUD?a9vS(""Y @DDtӆz;\.C|n>~T"uE= a05ԊQЊW81MHc煷M臦zt(u3##90I,RWD[1QRTb#؄&LN-Ƙ4V @DD-CES+Ζj2)wX\K3S tVG\F13"[ r9Kh,*]Ș5!0]Hru-u#n u D"L~_H$h,(12ZJ%poL|QuQ8YP&CFa!h32F yyRb|ߝł;#ތ͸`8[+%)_P_>2 Ŝ . 粰8&f2C9J̋DD7L*]!I83c->'Rgdl+*Ј壂ͧhN"uFDu? Ʃj$ g[Ɯɦ>.'!Ӯld$#^Ul7G?Uل":#ciSy<:6DPߞ\NBDD݉]y5Z=&1JKB? ?Ƹ Ox[ @DD햎-J:!h3NALsPϩjĞb<<& G< ""v6X2?ٛ쉁Py&IHLwh؝a;{Sl{ @DDtKm@V7aGpgE-"uFbѪը>s+LJPlj'H9b @DDtKܑ IDATa|'VKԧyc3G H<5apMMd:#n͌đr(1$ൻP_T eYioGuY<1. 6zRK3Z!C"+ixyG>5&CYq:۩)ؘӆ H +E5 @DDtKsP$Ua_f646;[vQ546+ 5T0SۢšCXɮp"DDDT ⋧HVwiF^X>*NA.btK遲.pӰ2;Ej" ""_0竚p؛JQv"^V>2|I/}8]X3mDDD\?{+#JBCkn0Ż&'r;[AӮBթ_D: }ID8aATHQ_DDD? T"s?*/ p9 Z-Rwt+;[S1{8U?Dt1makni\Aɱc"uGݭ&+ u575+wDZP pC"趙=SᱍѦNV ѨA}~RwhW*Qg' ¨7g$Ό;"Zh4[pwşcCPr0T"uG7Kՠh>Dxa(㫄`$,L8ѭDDD5>[:LBB @Ș> V\1 rK3 k/c`̏C"趛Qx루V uGzBƦ*= ybXX:xw;$"b"""o R/.8Zᇇ&eN!]Ƣ"8U3#15`PvY> H]DDD0Sp4/nqdT$E͹,:kZ[_`ǴmӊxH!% @DD$^x{n ފKb%Q(>r 4HW؄=1:l+o!~xdLH 1# `'^V/ۤۏ.@biW*kzbIZ5{M-1zDd,Htށh_gLzJ[޼'P!w 2 raFÜu{QlÁS^`"""Mai*u~iTذd, A=h,.™vhW*c;<ip4lɮ&R..??>Jܳv/5Z6iqӾMU߀o#ѧgU;t|NwK"Iz.@DD#cَdL tz=8Ʉ׈p Ӿ{nOLp=gSOwbѰ@$"2Qj ރ#Ay,p}ǝ0s[BMByi<1. Sw_ ْ7(ѵa"""u S?؍a.DXOU{$(lmEwҪ(>tEQx(6>v&_;OE @DDdRK0`N`{iC fهx}@H.(l pltxށbx @DDd[1=hjŮǦbp?GvZg$Cpϑ#!-q7FTI p74uCkۋ:lt2g{#zvZieuvxLb,0ڤ2x\J%J ͌ӓvz' o/j۱)و-1|q"fFᅩCVoaKRG4SHGSvKve.>.6I"D0Qtu L gKҬ>?$oC5* Ρ!H5QX9>2Vǫ$,kB!7DDƍz9XH$ĝAmQᥟh&,6boM{c#*NDM^>f  FѺ~JA.;8l @DDcն#ؑR' ĿfD]vd"O|ΕnѰtvcu"4jd.׿్ag>D+0Qkg- v'Kt}%xa["NV`~}{&T\!X62rη7㑯awz<:i &"z%(gbxuf4,L/Δ"sNW2ak.@MZ* iog' C!0u)X{8/l; Wso w'c""^嫄3ʚ:y:a~eG|HN9.āx鮡0sԇz ""u:$|x0 =ay5Mp<?Vغv~[ZoNFcQ1 TT 7GuyK T5⥟t.G󆣿m윈b""^++~J.@l;V͌]/s԰que?y{Ѹ&hiAcQ!EEPa|E~VȻ<6 J a Ǥ΋6 @DD,?%b_f w3N^F%؞RElh%`+77C"} +\Q tTUfꉙ}p`[t^Kj| ?'kkFGwy QoDDD}r KnEaivg]Kj?Gr*p,u6Haio0 l!37j5:ޤD{sT PףP0cpw򹞋tz=f؛QPw{<;y )QoDDD}Zw?· #cBaȭnBRQ  uhhNwUTB &f :\" v4Ph j(oG roGDx:\shRYȫiW<7ef Y @DDg7_s!2QG>ZCQ]3kljC] u-mnGFv5Za{ 3H%p4~N7Nc[~J.?榸oX,^}>"ކ@B^=yNJنa.C!&R1ɮlD\F1K?R3ycѰ@L 󆩌]DDD;Z3KCRvf;芡N>;]وc+p|9+GA 9&xb`߫GDԗ1uA0iE8t P԰4!=1aweU)PXیz!%V 7bFajx?s0]#N:V"uH/G 7Vp69p2̤`TӬBR6T5PЌf5Snvp@F"""nRQ]3QPDam3[P؆6T+UiVAӣkkn D{ 3\J xY Vq0!զ&"kDDDDDD}N""""">  """""3d6Ii(v| zVIENDB`glance-12.0.0/doc/source/cache.rst0000664000567000056710000001637412701407047020043 0ustar jenkinsjenkins00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The Glance Image Cache ====================== The Glance API server may be configured to have an optional local image cache. A local image cache stores a copy of image files, essentially enabling multiple API servers to serve the same image file, resulting in an increase in scalability due to an increased number of endpoints serving an image file. This local image cache is transparent to the end user -- in other words, the end user doesn't know that the Glance API is streaming an image file from its local cache or from the actual backend storage system. Managing the Glance Image Cache ------------------------------- While image files are automatically placed in the image cache on successful requests to ``GET /images/``, the image cache is not automatically managed. Here, we describe the basics of how to manage the local image cache on Glance API servers and how to automate this cache management. Configuration options for the Image Cache ----------------------------------------- The Glance cache uses two files: one for configuring the server and another for the utilities. The ``glance-api.conf`` is for the server and the ``glance-cache.conf`` is for the utilities. The following options are in both configuration files. These need the same values otherwise the cache will potentially run into problems. - ``image_cache_dir`` This is the base directory where Glance stores the cache data (Required to be set, as does not have a default). - ``image_cache_sqlite_db`` Path to the sqlite file database that will be used for cache manangement. This is a relative path from the ``image_cache_dir`` directory (Default:``cache.db``). - ``image_cache_driver`` The driver used for cache management. (Default:``sqlite``) - ``image_cache_max_size`` The size when the glance-cache-pruner will remove the oldest images, to reduce the bytes until under this value. (Default:``10 GB``) - ``image_cache_stall_time`` The amount of time an incomplete image will stay in the cache, after this the incomplete image will be deleted. (Default:``1 day``) The following values are the ones that are specific to the ``glance-cache.conf`` and are only required for the prefetcher to run correctly. - ``admin_user`` The username for an admin account, this is so it can get the image data into the cache. - ``admin_password`` The password to the admin account. - ``admin_tenant_name`` The tenant of the admin account. - ``auth_url`` The URL used to authenticate to keystone. This will be taken from the environment varibles if it exists. - ``filesystem_store_datadir`` This is used if using the filesystem store, points to where the data is kept. - ``filesystem_store_datadirs`` This is used to point to multiple filesystem stores. - ``registry_host`` The URL to the Glance registry. Controlling the Growth of the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The image cache has a configurable maximum size (the ``image_cache_max_size`` configuration file option). The ``image_cache_max_size`` is an upper limit beyond which pruner, if running, starts cleaning the images cache. However, when images are successfully returned from a call to ``GET /images/``, the image cache automatically writes the image file to its cache, regardless of whether the resulting write would make the image cache's size exceed the value of ``image_cache_max_size``. In order to keep the image cache at or below this maximum cache size, you need to run the ``glance-cache-pruner`` executable. The recommended practice is to use ``cron`` to fire ``glance-cache-pruner`` at a regular interval. Cleaning the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~ Over time, the image cache can accumulate image files that are either in a stalled or invalid state. Stalled image files are the result of an image cache write failing to complete. Invalid image files are the result of an image file not being written properly to disk. To remove these types of files, you run the ``glance-cache-cleaner`` executable. The recommended practice is to use ``cron`` to fire ``glance-cache-cleaner`` at a semi-regular interval. Prefetching Images into the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some installations have base (sometimes called "golden") images that are very commonly used to boot virtual machines. When spinning up a new API server, administrators may wish to prefetch these image files into the local image cache to ensure that reads of those popular image files come from a local cache. To queue an image for prefetching, you can use one of the following methods: * If the ``cache_manage`` middleware is enabled in the application pipeline, you may call ``PUT /queued-images/`` to queue the image with identifier ```` Alternately, you can use the ``glance-cache-manage`` program to queue the image. This program may be run from a different host than the host containing the image cache. Example usage:: $> glance-cache-manage --host= queue-image This will queue the image with identifier ```` for prefetching Once you have queued the images you wish to prefetch, call the ``glance-cache-prefetcher`` executable, which will prefetch all queued images concurrently, logging the results of the fetch for each image. Finding Which Images are in the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can find out which images are in the image cache using one of the following methods: * If the ``cachemanage`` middleware is enabled in the application pipeline, you may call ``GET /cached-images`` to see a JSON-serialized list of mappings that show cached images, the number of cache hits on each image, the size of the image, and the times they were last accessed. Alternately, you can use the ``glance-cache-manage`` program. This program may be run from a different host than the host containing the image cache. Example usage:: $> glance-cache-manage --host= list-cached * You can issue the following call on \*nix systems (on the host that contains the image cache):: $> ls -lhR $IMAGE_CACHE_DIR where ``$IMAGE_CACHE_DIR`` is the value of the ``image_cache_dir`` configuration variable. Note that the image's cache hit is not shown using this method. Manually Removing Images from the Image Cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the ``cachemanage`` middleware is enabled, you may call ``DELETE /cached-images/`` to remove the image file for image with identifier ```` from the cache. Alternately, you can use the ``glance-cache-manage`` program. Example usage:: $> glance-cache-manage --host= delete-cached-image glance-12.0.0/doc/source/artifact-types.rst0000664000567000056710000000136012701407047021724 0ustar jenkinsjenkins00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Glance Artifact Types ===================== .. list-plugins:: glance.artifacts.types :detailed: glance-12.0.0/doc/source/architecture.rst0000664000567000056710000000455512701407051021453 0ustar jenkinsjenkins00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================== Basic architecture ================== OpenStack Glance has a client-server architecture that provides a REST API to the user through which requests to the server can be performed. A Glance Domain Controller manages the internal server operations that is divided into layers. Specific tasks are implemented by each layer. All the file (Image data) operations are performed using glance_store library, which is responsible for interaction with external storage back ends and (or) local filesystem(s). The glance_store library provides a uniform interface to access the backend stores. Glance uses a central database (Glance DB) that is shared amongst all the components in the system and is sql-based by default. Other types of database backends are somewhat supported and used by operators but are not extensively tested upstream. .. figure:: /images/architecture.png :figwidth: 100% :align: center :alt: OpenStack Glance Architecture .. centered:: Image 1. OpenStack Glance Architecture Following components are present in the Glance architecture: * **A client** - any application that makes use of a Glance server. * **REST API** - Glance functionalities are exposed via REST. * **Database Abstraction Layer (DAL)** - an application programming interface (API) that unifies the communication between Glance and databases. * **Glance Domain Controller** - middleware that implements the main Glance functionalities such as authorization, notifications, policies, database connections. * **Glance Store** - used to organize interactions between Glance and various data stores. * **Registry Layer** - optional layer that is used to organise secure communication between the domain and the DAL by using a separate service. glance-12.0.0/doc/source/authentication.rst0000664000567000056710000001135612701407051022005 0ustar jenkinsjenkins00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Authentication With Keystone ============================ Glance may optionally be integrated with Keystone. Setting this up is relatively straightforward, as the Keystone distribution includes the necessary middleware. Once you have installed Keystone and edited your configuration files, newly created images will have their `owner` attribute set to the tenant of the authenticated users, and the `is_public` attribute will cause access to those images for which it is `false` to be restricted to only the owner, users with admin context, or tenants/users with whom the image has been shared. Configuring the Glance servers to use Keystone ---------------------------------------------- Keystone is integrated with Glance through the use of middleware. The default configuration files for both the Glance API and the Glance Registry use a single piece of middleware called ``unauthenticated-context``, which generates a request context containing blank authentication information. In order to configure Glance to use Keystone, the ``authtoken`` and ``context`` middlewares must be deployed in place of the ``unauthenticated-context`` middleware. The ``authtoken`` middleware performs the authentication token validation and retrieves actual user authentication information. It can be found in the Keystone distribution. Configuring Glance API to use Keystone -------------------------------------- Configuring Glance API to use Keystone is relatively straight forward. The first step is to ensure that declarations for the two pieces of middleware exist in the ``glance-api-paste.ini``. Here is an example for ``authtoken``:: [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory auth_url = http://localhost:35357 project_domain_id = default project_name = service_admins user_domain_id = default username = glance_admin password = password1234 The actual values for these variables will need to be set depending on your situation. For more information, please refer to the Keystone `documentation`_ on the ``auth_token`` middleware, but in short: .. _documentation http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration * The ``auth_url`` variable points to the Keystone service. This information is used by the middleware to actually query Keystone about the validity of the authentication tokens. * The auth credentials (``project_name``, ``project_domain_id``, ``user_domain_id``, ``username``, and ``password``) will be used to retrieve a service token. That token will be used to authorize user tokens behind the scenes. Finally, to actually enable using Keystone authentication, the application pipeline must be modified. By default, it looks like:: [pipeline:glance-api] pipeline = versionnegotiation unauthenticated-context apiv1app Your particular pipeline may vary depending on other options, such as the image cache. This must be changed by replacing ``unauthenticated-context`` with ``authtoken`` and ``context``:: [pipeline:glance-api] pipeline = versionnegotiation authtoken context apiv1app Configuring Glance Registry to use Keystone ------------------------------------------- Configuring Glance Registry to use Keystone is also relatively straight forward. The same middleware needs to be added to ``glance-registry-paste.ini`` as was needed by Glance API; see above for an example of the ``authtoken`` configuration. Again, to enable using Keystone authentication, the appropriate application pipeline must be selected. By default, it looks like:: [pipeline:glance-registry-keystone] pipeline = authtoken context registryapp To enable the above application pipeline, in your main ``glance-registry.conf`` configuration file, select the appropriate deployment flavor by adding a ``flavor`` attribute in the ``paste_deploy`` group:: [paste_deploy] flavor = keystone .. note:: If your authentication service uses a role other than ``admin`` to identify which users should be granted admin-level privileges, you must define it in the ``admin_role`` config attribute in both ``glance-registry.conf`` and ``glance-api.conf``. glance-12.0.0/doc/source/controllingservers.rst0000664000567000056710000002734012701407047022737 0ustar jenkinsjenkins00000000000000.. Copyright 2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Controlling Glance Servers ========================== This section describes the ways to start, stop, and reload Glance's server programs. Starting a server ----------------- There are two ways to start a Glance server (either the API server or the registry server): * Manually calling the server program * Using the ``glance-control`` server daemon wrapper program We recommend using the second method. Manually starting the server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first is by directly calling the server program, passing in command-line options and a single argument for a ``paste.deploy`` configuration file to use when configuring the server application. .. note:: Glance ships with an ``etc/`` directory that contains sample ``paste.deploy`` configuration files that you can copy to a standard configuration directory and adapt for your own uses. Specifically, bind_host must be set properly. If you do `not` specify a configuration file on the command line, Glance will do its best to locate a configuration file in one of the following directories, stopping at the first config file it finds: * ``$CWD`` * ``~/.glance`` * ``~/`` * ``/etc/glance`` * ``/etc`` The filename that is searched for depends on the server application name. So, if you are starting up the API server, ``glance-api.conf`` is searched for, otherwise ``glance-registry.conf``. If no configuration file is found, you will see an error, like:: $> glance-api ERROR: Unable to locate any configuration file. Cannot load application glance-api Here is an example showing how you can manually start the ``glance-api`` server and ``glance-registry`` in a shell.:: $ sudo glance-api --config-file glance-api.conf --debug & jsuh@mc-ats1:~$ 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** 2011-04-13 14:50:12 DEBUG [glance-api] Configuration options gathered from config file: 2011-04-13 14:50:12 DEBUG [glance-api] /home/jsuh/glance-api.conf 2011-04-13 14:50:12 DEBUG [glance-api] ================================================ 2011-04-13 14:50:12 DEBUG [glance-api] bind_host 65.114.169.29 2011-04-13 14:50:12 DEBUG [glance-api] bind_port 9292 2011-04-13 14:50:12 DEBUG [glance-api] debug True 2011-04-13 14:50:12 DEBUG [glance-api] default_store file 2011-04-13 14:50:12 DEBUG [glance-api] filesystem_store_datadir /home/jsuh/images/ 2011-04-13 14:50:12 DEBUG [glance-api] registry_host 65.114.169.29 2011-04-13 14:50:12 DEBUG [glance-api] registry_port 9191 2011-04-13 14:50:12 DEBUG [glance-api] verbose False 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** 2011-04-13 14:50:12 DEBUG [routes.middleware] Initialized with method overriding = True, and path info altering = True 2011-04-13 14:50:12 DEBUG [eventlet.wsgi.server] (21354) wsgi starting up on http://65.114.169.29:9292/ $ sudo glance-registry --config-file glance-registry.conf & jsuh@mc-ats1:~$ 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("images") 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] () 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk') 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'name', u'VARCHAR(255)', 0, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'disk_format', u'VARCHAR(20)', 0, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'container_format', u'VARCHAR(20)', 0, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (8, u'size', u'INTEGER', 0, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (9, u'status', u'VARCHAR(30)', 1, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (10, u'is_public', u'BOOLEAN', 1, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (11, u'location', u'TEXT', 0, None, 0) 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("image_properties") 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] () 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk') 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'image_id', u'INTEGER', 1, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'key', u'VARCHAR(255)', 1, None, 0) 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'value', u'TEXT', 0, None, 0) $ ps aux | grep glance root 20009 0.7 0.1 12744 9148 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-api glance-api.conf --debug root 20012 2.0 0.1 25188 13356 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-registry glance-registry.conf jsuh 20017 0.0 0.0 3368 744 pts/1 S+ 12:47 0:00 grep glance Simply supply the configuration file as the parameter to the ``--config-file`` option (the ``etc/glance-api.conf`` and ``etc/glance-registry.conf`` sample configuration files were used in the above example) and then any other options you want to use. (``--debug`` was used above to show some of the debugging output that the server shows when starting up. Call the server program with ``--help`` to see all available options you can specify on the command line.) For more information on configuring the server via the ``paste.deploy`` configuration files, see the section entitled :doc:`Configuring Glance servers ` Note that the server `daemonizes` itself by using the standard shell backgrounding indicator, ``&``, in the previous example. For most use cases, we recommend using the ``glance-control`` server daemon wrapper for daemonizing. See below for more details on daemonization with ``glance-control``. Using the ``glance-control`` program to start the server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The second way to start up a Glance server is to use the ``glance-control`` program. ``glance-control`` is a wrapper script that allows the user to start, stop, restart, and reload the other Glance server programs in a fashion that is more conducive to automation and scripting. Servers started via the ``glance-control`` program are always `daemonized`, meaning that the server program process runs in the background. To start a Glance server with ``glance-control``, simply call ``glance-control`` with a server and the word "start", followed by any command-line options you wish to provide. Start the server with ``glance-control`` in the following way:: $> sudo glance-control [OPTIONS] start [CONFPATH] .. note:: You must use the ``sudo`` program to run ``glance-control`` currently, as the pid files for the server programs are written to /var/run/glance/ Here is an example that shows how to start the ``glance-registry`` server with the ``glance-control`` wrapper script. :: $ sudo glance-control api start glance-api.conf Starting glance-api with /home/jsuh/glance.conf $ sudo glance-control registry start glance-registry.conf Starting glance-registry with /home/jsuh/glance.conf $ ps aux | grep glance root 20038 4.0 0.1 12728 9116 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-api /home/jsuh/glance-api.conf root 20039 6.0 0.1 25188 13356 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-registry /home/jsuh/glance-registry.conf jsuh 20042 0.0 0.0 3368 744 pts/1 S+ 12:51 0:00 grep glance The same configuration files are used by ``glance-control`` to start the Glance server programs, and you can specify (as the example above shows) a configuration file when starting the server. In order for your launched glance service to be monitored for unexpected death and respawned if necessary, use the following option: $ sudo glance-control [service] start --respawn ... Note that this will cause ``glance-control`` itself to remain running. Also note that deliberately stopped services are not respawned, neither are rapidly bouncing services (where process death occurred within one second of the last launch). By default, output from glance services is discarded when launched with ``glance-control``. In order to capture such output via syslog, use the following option: $ sudo glance-control --capture-output ... Stopping a server ----------------- If you started a Glance server manually and did not use the ``&`` backgrounding function, simply send a terminate signal to the server process by typing ``Ctrl-C`` If you started the Glance server using the ``glance-control`` program, you can use the ``glance-control`` program to stop it. Simply do the following:: $> sudo glance-control stop as this example shows:: $> sudo glance-control registry stop Stopping glance-registry pid: 17602 signal: 15 Restarting a server ------------------- You can restart a server with the ``glance-control`` program, as demonstrated here:: $> sudo glance-control registry restart etc/glance-registry.conf Stopping glance-registry pid: 17611 signal: 15 Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance-registry.conf Reloading a server ------------------- You can reload a server with the ``glance-control`` program, as demonstrated here:: $> sudo glance-control api reload Reloading glance-api (pid 18506) with signal(1) A reload sends a SIGHUP signal to the master process and causes new configuration settings to be picked up without any interruption to the running service (provided neither bind_host or bind_port has changed). glance-12.0.0/doc/source/statuses.rst0000664000567000056710000000577112701407047020652 0ustar jenkinsjenkins00000000000000.. Copyright 2010 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Image Statuses ============== Images in Glance can be in one the following statuses: * ``queued`` The image identifier has been reserved for an image in the Glance registry. No image data has been uploaded to Glance and the image size was not explicitly set to zero on creation. * ``saving`` Denotes that an image's raw data is currently being uploaded to Glance. When an image is registered with a call to `POST /images` and there is an `x-image-meta-location` header present, that image will never be in the `saving` status (as the image data is already available in some other location). * ``active`` Denotes an image that is fully available in Glance. This occurs when the image data is uploaded, or the image size is explicitly set to zero on creation. * ``deactivated`` Denotes that access to image data is not allowed to any non-admin user. Prohibiting downloads of an image also prohibits operations like image export and image cloning that may require image data. * ``killed`` Denotes that an error occurred during the uploading of an image's data, and that the image is not readable. * ``deleted`` Glance has retained the information about the image, but it is no longer available to use. An image in this state will be removed automatically at a later date. * ``pending_delete`` This is similar to `deleted`, however, Glance has not yet removed the image data. An image in this state is not recoverable. .. figure:: /images/image_status_transition.png :figwidth: 100% :align: center :alt: Image status transition This is a representation of how the image move from one status to the next. * Add location from zero to more than one. Task Statuses ============== Tasks in Glance can be in one the following statuses: * ``pending`` The task identifier has been reserved for a task in the Glance. No processing has begun on it yet. * ``processing`` The task has been picked up by the underlying executor and is being run using the backend Glance execution logic for that task type. * ``success`` Denotes that the task has had a successful run within Glance. The ``result`` field of the task shows more details about the outcome. * ``failure`` Denotes that an error occurred during the execution of the task and it cannot continue processing. The ``message`` field of the task shows what the error was. glance-12.0.0/doc/source/tasks.rst0000664000567000056710000001562612701407047020124 0ustar jenkinsjenkins00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Tasks ============== Conceptual Overview ------------------- Image files can be quite large, and processing images (converting an image from one format to another, for example) can be extremely resource intensive. Additionally, a one-size-fits-all approach to processing images is not desirable. A public cloud will have quite different security concerns than, for example, a small private cloud run by an academic department in which all users know and trust each other. Thus a public cloud deployer may wish to run various validation checks on an image that a user wants to bring in to the cloud, whereas the departmental cloud deployer may view such processing as a waste of resources. To address this situation, Glance contains *tasks*. Tasks are intended to offer end users a front end to long running asynchronous operations -- the type of operation you kick off and don't expect to finish until you've gone to the coffee shop, had a pleasant chat with your barista, had a coffee, had a pleasant walk home, etc. The asynchronous nature of tasks is emphasized up front in order to set end user expectations with respect to how long the task may take (hint: longer than other Glance operations). Having a set of operations performed by tasks allows a deployer flexibility with respect to how many operations will be processed simultaneously, which in turn allows flexibility with respect to what kind of resources need to be set aside for task processing. Thus, although large cloud deployers are certainly interested in tasks for the alternative custom image processing workflow they enable, smaller deployers find them useful as a means of controlling resource utilization. An additional reason tasks have been introduced into Glance is to support Glance's role in the OpenStack ecosystem. Glance provides cataloging, storage, and delivery of virtual machine images. As such, it needs to be responsive to other OpenStack components. Nova, for instance, requests images from Glance in order to boot instances; it uploads images to Glance as part of its workflow for the Nova image-create action; and it uses Glance to provide the data for the image-related API calls that are defined in the Compute API that Nova instantiates. It is necessary to the proper functioning of an OpenStack cloud that these synchronous operations not be compromised by excess load caused by non-essential functionality such as image import. By separating the tasks resource from the images resource in the Images API, it's easier for deployers to allocate resources and route requests for tasks separately from the resources required to support Glance's service role. At the same time this separation avoids confusion for users of an OpenStack cloud. Responses to requests to ``/v2/images`` should return fairly quickly, while requests to ``/v2/tasks`` may take a while. In short, tasks provide a common API across OpenStack installations for users of an OpenStack cloud to request image-related operations, yet at the same time tasks are customizable for individual cloud providers. Conceptual Details ------------------ A Glance task is a request to perform an asynchronous image-related operation. The request results in the creation of a *task resource* that can be polled for information about the status of the operation. A specific type of resource distinct from the traditional Glance image resource is appropriate here for several reasons: * A dedicated task resource can be developed independently of the traditional Glance image resource, both with respect to structure and workflow. * There may be multiple tasks (for example, image export or image conversion) operating on an image simultaneously. * A dedicated task resource allows for the delivery to the end user of clear, detailed error messages specific to the particular operation. * A dedicated task resource respects the principle of least surprise. For example, an import task does not create an image in Glance until it's clear that the bits submitted pass the deployer's tests for an allowable image. Upon reaching a final state (``success`` or ``error``) a task resource is assigned an expiration datetime that's displayed in the ``expires_at`` field. (The time between final state and expiration is configurable.) After that datetime, the task resource is subject to being deleted. The result of the task (for example, an imported image) will still exist. For details about the defined task statuses, please see :doc:`Task Statuses ` Tasks expire eventually because there's no reason to keep them around, as the user will have the result of the task, which was the point of creating the task in the first place. The reason tasks aren't instantly deleted is that there may be information contained in the task resource that's not easily available elsewhere. (For example, a successful import task will eventually result in the creation of an image in Glance, and it would be useful to know the UUID of this image. Similarly, if the import task fails, we want to give the end user time to read the task resource to analyze the error message.) Task Entities ------------- A task entity is represented by a JSON-encoded data structure defined by the JSON schema available at ``/v2/schemas/task``. A task entity has an identifier (``id``) that is guaranteed to be unique within the endpoint to which it belongs. The id is used as a token in request URIs to interact with that specific task. In addition to the usual properties you'd expect (for example, ``created_at``, ``self``, ``type``, ``status``, ``updated_at``, etc.), tasks have these properties of interest: * ``input``: this is defined to be a JSON blob, the exact content of which will depend upon the requirements set by the specific cloud deployer. The intent is that each deployer will document these requirements for end users. * ``result``: this is also defined to be a JSON blob, the content of which will be documented by each cloud deployer. The ``result`` element will be null until the task has reached a final state, and if the final status is ``failure``, the result element remains null. * ``message``: this string field is expected to be null unless the task has entered ``failure`` status. At that point, it contains an informative human-readable message concerning the reason(s) for the task failure. glance-12.0.0/doc/source/image-location-strategy-modules.rst0000664000567000056710000000143312701407047025164 0ustar jenkinsjenkins00000000000000.. Copyright 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Image Location Strategy Modules =============================== .. list-plugins:: glance.common.image_location_strategy.modules :detailed: glance-12.0.0/doc/source/images_src/0000775000567000056710000000000012701407204020342 5ustar jenkinsjenkins00000000000000glance-12.0.0/doc/source/images_src/glance_db.graphml0000664000567000056710000003010612701407047023621 0ustar jenkinsjenkins00000000000000 Images id: varchar(36), primary name: varchar(255), nullable size: bigint(20), nullable status: varchar(30) is_public: tinyint(1) created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) disk_format: varchar(20), nullable container_format: varchar(20), nullable checksum: varchar(32), nullable owner: varchar(255), nullable min_disk: int(11) min_ram: int(11) protected: tinyint(1) virtual_size: bigint(20), nullable image_locations id: int(11), primary image_id: varchar(36) value: text created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) meta_data: text, nullable status: varchar(30) image_members id: int(11), primary image_id: varchar(36) member: varchar(255) can_share: tiny_int(1) created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) status: varchar(20) image_properties id: int(11), primary image_id: varchar(36) name: varchar(255) value: text, nullable created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) image_tags id: int(11), primary image_id: varchar(36) value: varchar(255) created_at: datetime updated_at: datetime, nullable deleted_at: datetime, nullable deleted: tinyint(1) glance-12.0.0/doc/source/images_src/glance_layers.graphml0000664000567000056710000005162612701407047024545 0ustar jenkinsjenkins00000000000000 Domain Router api/v2/router.py REST API api/v2/* Auth api/authorization.py Notifier notifier.py Policy api/policy.py Quota quota/__init__.py Location location.py DB db/__init__.py Registry (optional) registry/v2/* Data Access db/sqlalchemy/api.py A Client Glance Store DBMS Property protection (optional) api/property_protections.py glance-12.0.0/doc/source/images_src/image_status_transition.dot0000664000567000056710000000323712701407047026023 0ustar jenkinsjenkins00000000000000/* # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. */ /* This file can be compiled by graphviz with issuing the following command: dot -Tpng -oimage_status_transition.png image_status_transition.dot See http://www.graphviz.org to get more info. */ digraph { node [shape="doublecircle" color="#006699" style="filled" fillcolor="#33CCFF" fixedsize="True" width="1.5" height="1.5"]; "" -> "queued" [label="create image"]; "queued" -> "active" [label="add location*"]; "queued" -> "saving" [label="upload"]; "queued" -> "deleted" [label="delete"]; "saving" -> "active" [label="upload succeed"]; "saving" -> "killed" [label="[v1] upload fail"]; "saving" -> "queued" [label="[v2] upload fail"]; "saving" -> "deleted" [label="delete"]; "active" -> "pending_delete" [label="delayed delete"]; "active" -> "deleted" [label="delete"]; "active" -> "deactivated" [label="deactivate"]; "deactivated" -> "active" [label="reactivate"]; "deactivated" -> "deleted" [label="delete"]; "killed" -> "deleted" [label="delete"]; "pending_delete" -> "deleted" [label="after scrub time"]; } glance-12.0.0/doc/source/images_src/architecture.graphml0000664000567000056710000016020712701407047024413 0ustar jenkinsjenkins00000000000000 Keystone Folder 2 API Glance Folder 3 REST API Glance DB Database Abstraction Layer Glance Domain Controller Auth Notifier Policy Quota Location DB AuthZ Middleware Registry Layer Glance Store Folder 4 Glance Store Drivers AuthN Supported Storages Folder 5 Swift S3 Ceph Sheepdog ... Filesystem A client Folder 7 AuthN <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="40px" height="48px" viewBox="0 0 40 48" enable-background="new 0 0 40 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="655.0938" x2="409.4502" y2="655.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,37.613C8.787,37.613,0,35.738,0,33.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,35.738,30.464,37.613,19.625,37.613z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="649.0938" x2="409.4502" y2="649.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,37.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,35.738,8.787,37.613,19.625,37.613z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="646" x2="408.2217" y2="646" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_3_)" cx="19.625" cy="31.425" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="641.0938" x2="409.4502" y2="641.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.613C8.787,23.613,0,21.738,0,19.425v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.738,30.464,23.613,19.625,23.613z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="635.0938" x2="409.4502" y2="635.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,23.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.738,8.787,23.613,19.625,23.613z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="632" x2="408.2217" y2="632" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_6_)" cx="19.625" cy="17.426" rx="18.396" ry="3.926"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="627.5938" x2="409.4502" y2="627.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#4D4D4D"/> <stop offset="0.0558" style="stop-color:#5F5F5F"/> <stop offset="0.2103" style="stop-color:#8D8D8D"/> <stop offset="0.3479" style="stop-color:#AEAEAE"/> <stop offset="0.4623" style="stop-color:#C2C2C2"/> <stop offset="0.5394" style="stop-color:#C9C9C9"/> <stop offset="0.6247" style="stop-color:#C5C5C5"/> <stop offset="0.7072" style="stop-color:#BABABA"/> <stop offset="0.7885" style="stop-color:#A6A6A6"/> <stop offset="0.869" style="stop-color:#8B8B8B"/> <stop offset="0.9484" style="stop-color:#686868"/> <stop offset="1" style="stop-color:#4D4D4D"/> </linearGradient> <path fill="url(#SVGID_7_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_8_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="621.5938" x2="409.4502" y2="621.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#B3B3B3"/> <stop offset="0.0171" style="stop-color:#B6B6B6"/> <stop offset="0.235" style="stop-color:#D7D7D7"/> <stop offset="0.4168" style="stop-color:#EBEBEB"/> <stop offset="0.5394" style="stop-color:#F2F2F2"/> <stop offset="0.6579" style="stop-color:#EEEEEE"/> <stop offset="0.7724" style="stop-color:#E3E3E3"/> <stop offset="0.8853" style="stop-color:#CFCFCF"/> <stop offset="0.9965" style="stop-color:#B4B4B4"/> <stop offset="1" style="stop-color:#B3B3B3"/> </linearGradient> <path fill="url(#SVGID_8_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_9_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="618.5" x2="408.2217" y2="618.5" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> <stop offset="0" style="stop-color:#C9C9C9"/> <stop offset="1" style="stop-color:#808080"/> </linearGradient> <ellipse fill="url(#SVGID_9_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.291,46.792c0,0-4.313,0.578-7.249,0.694 C20.917,47.613,15,47.613,15,47.613l-2.443-10.279l-0.119-2.283l-1.231-1.842L9.789,23.024l-0.082-0.119L9.3,20.715l-1.45-1.44 L5.329,8.793c0,0,5.296,0.882,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.644l-0.375,1.875 l1.627,2.193L31.291,46.792z"/> </svg> <?xml version="1.0" encoding="utf-8"?> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="41px" height="48px" viewBox="-0.875 -0.887 41 48" enable-background="new -0.875 -0.887 41 48" xml:space="preserve"> <defs> </defs> <linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-979.1445" x2="682.0508" y2="-979.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_1_)" d="M19.625,36.763C8.787,36.763,0,34.888,0,32.575v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,34.888,30.464,36.763,19.625,36.763z"/> <linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-973.1445" x2="682.0508" y2="-973.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_2_)" d="M19.625,36.763c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,34.888,8.787,36.763,19.625,36.763z"/> <path fill="#3C89C9" d="M19.625,26.468c10.16,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.554,5.438 c-12.125,0-18.467-2.484-19.541-4.918C-0.127,29.125,9.465,26.468,19.625,26.468z"/> <linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-965.6948" x2="682.0508" y2="-965.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_3_)" d="M19.625,23.313C8.787,23.313,0,21.438,0,19.125v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.438,30.464,23.313,19.625,23.313z"/> <linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-959.6948" x2="682.0508" y2="-959.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_4_)" d="M19.625,23.313c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.438,8.787,23.313,19.625,23.313z"/> <path fill="#3C89C9" d="M19.476,13.019c10.161,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.555,5.438 c-12.125,0-18.467-2.485-19.541-4.918C-0.277,15.674,9.316,13.019,19.476,13.019z"/> <linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-952.4946" x2="682.0508" y2="-952.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#3C89C9"/> <stop offset="0.1482" style="stop-color:#60A6DD"/> <stop offset="0.3113" style="stop-color:#81C1F0"/> <stop offset="0.4476" style="stop-color:#95D1FB"/> <stop offset="0.5394" style="stop-color:#9CD7FF"/> <stop offset="0.636" style="stop-color:#98D4FD"/> <stop offset="0.7293" style="stop-color:#8DCAF6"/> <stop offset="0.8214" style="stop-color:#79BBEB"/> <stop offset="0.912" style="stop-color:#5EA5DC"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_5_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> <linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-946.4946" x2="682.0508" y2="-946.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="0.0039" style="stop-color:#9DD7FF"/> <stop offset="0.2273" style="stop-color:#BDE5FF"/> <stop offset="0.4138" style="stop-color:#D1EEFF"/> <stop offset="0.5394" style="stop-color:#D9F1FF"/> <stop offset="0.6155" style="stop-color:#D5EFFE"/> <stop offset="0.6891" style="stop-color:#C9E7FA"/> <stop offset="0.7617" style="stop-color:#B6DAF3"/> <stop offset="0.8337" style="stop-color:#9AC8EA"/> <stop offset="0.9052" style="stop-color:#77B0DD"/> <stop offset="0.9754" style="stop-color:#4D94CF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <path fill="url(#SVGID_6_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> <linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="644.0293" y1="-943.4014" x2="680.8223" y2="-943.4014" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> <stop offset="0" style="stop-color:#9CD7FF"/> <stop offset="1" style="stop-color:#3C89C9"/> </linearGradient> <ellipse fill="url(#SVGID_7_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> <path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.04,45.982c0,0-4.354,0.664-7.29,0.781 c-3.125,0.125-8.952,0-8.952,0l-2.384-10.292l0.044-2.108l-1.251-1.154L9.789,23.024l-0.082-0.119L9.5,20.529l-1.65-1.254 L5.329,8.793c0,0,4.213,0.903,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.645l-0.521,1.416 l1.46,1.834L31.04,45.982z"/> </svg> glance-12.0.0/LICENSE0000664000567000056710000002363712701407047015206 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. glance-12.0.0/run_tests.sh0000775000567000056710000001711112701407047016554 0ustar jenkinsjenkins00000000000000#!/bin/bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Glance's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -8, --pep8-only-changed Just run PEP8 and HACKING compliance check on files changed since HEAD~1" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --virtual-env-path Location of the virtualenv directory" echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory" echo " Default: .venv" echo " --tools-path Location of the tools directory" echo " Default: \$(pwd)" echo " --concurrency How many processes to use when running the tests. A value of 0 autodetects concurrency from your CPU count" echo " Default: 0" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -8|--pep8-only-changed) just_pep8_changed=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; --concurrency) (( i++ )) concurrency=${!i} ;; -*) testropts="$testropts ${!i}";; *) testrargs="$testrargs ${!i}" esac (( i++ )) done } tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= testrargs= testropts= wrapper="" just_pep8=0 just_pep8_changed=0 no_pep8=0 coverage=0 debug=0 update=0 concurrency=0 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then # Default to running all tests if specific test is not # provided. testrargs="discover ./glance/tests" fi ${wrapper} python -m testtools.run $testropts $testrargs # Short circuit because all of the testr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then TESTRTESTS="$TESTRTESTS --coverage" else TESTRTESTS="$TESTRTESTS" fi # Just run the test suites in current environment set +e testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit --concurrency $concurrency $testropts $testrargs'" if [ setup.cfg -nt glance.egg-info/entry_points.txt ] then ${wrapper} python setup.py egg_info fi echo "Running \`${wrapper} $TESTRTESTS\`" if ${wrapper} which subunit-2to1 2>&1 > /dev/null then # subunit-2to1 is present, testr subunit stream should be in version 2 # format. Convert to version one before colorizing. bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py" else bash -c "${wrapper} $TESTRTESTS | ${wrapper} tools/colorizer.py" fi RESULT=$? set -e copy_subunit_log if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" ${wrapper} coverage combine ${wrapper} coverage html --include='glance/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=`cat .testrepository/next-stream` LOGNAME=$(($LOGNAME - 1)) LOGNAME=".testrepository/${LOGNAME}" cp $LOGNAME subunit.log } function warn_on_flake8_without_venv { if [ $never_venv -eq 1 ]; then echo "**WARNING**:" echo "Running flake8 without virtual env may miss OpenStack HACKING detection" fi } function run_pep8 { echo "Running flake8 ..." warn_on_flake8_without_venv bash -c "${wrapper} flake8" } TESTRTESTS="lockutils-wrapper python setup.py testr" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $just_pep8_changed -eq 1 ]; then # NOTE(gilliard) We want use flake8 to check the entirety of every file that has # a change in it. Unfortunately the --filenames argument to flake8 only accepts # file *names* and there are no files named (eg) "nova/compute/manager.py". The # --diff argument behaves surprisingly as well, because although you feed it a # diff, it actually checks the file on disk anyway. files=$(git diff --name-only HEAD~1 | tr '\n' ' ') echo "Running flake8 on ${files}" warn_on_flake8_without_venv bash -c "diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff" exit fi run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (testropts), which begin with a '-', and # arguments (testrargs). if [ -z "$testrargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi glance-12.0.0/ChangeLog0000664000567000056710000047412512701407203015747 0ustar jenkinsjenkins00000000000000CHANGES ======= 12.0.0 ------ * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Update .gitreview for stable/mitaka 12.0.0.0rc1 ----------- * revert warnerrors before gate breaks * Imported Translations from Zanata * Catch exceptions.HasSnapshot() from delete image in rbd driver * Imported Translations from Zanata * register the config generator default hook with the right name * Reject bodies for metadef commands * Remove unused enable_v3_api config option * glance-manage db purge failure for limit * Imported Translations from Zanata * Remove state transition from active to queued * Imported Translations from Zanata * Updated the wording in the database architecture docs * Test tag against schema to check length * Update the config files * Imported Translations from Zanata * Adds virtual_size to notifications * Update configuring of Cinder store * Add debug testenv in tox * Fix levels of Swift configuration documentation * no module docs generated * Deprecate use_user_token parameter * Creating or updating a image member in a list causes 500 * Updated from global requirements * Updating comment in tests/unit/test_migrations.py * Fix update all props when you delete image 12.0.0.0b3 ---------- * Fix location update * Moved CORS middleware configuration into oslo-config-generator * Use assertGreater/Less/Equal instead of assertTrue(A * B) * New metadata definitions from CIM * Add support for DSA signatures * Fix message formatting in glance-manage purge * Updated from global requirements * Remove unused pngmath sphinx extension * Do not use constraints for venv * Fix BaseException.message deprecation warnings * Remove py33 from tox envlist * Resolve i18n and Sphinx issues in signature_utils * Add support for ECC signatures * Return 204 rather than 403 when no image data * Move bandit into pep8 * Updated from global requirements * Support importing OVA/OVF package to Glance * Always use constraints * Updated from global requirements * Include registry_client_* options in glance-scrubber.conf * Python 3: fix a few simple "str vs bytes" issues * remove redundant "#!/usr/bin/env python" header * Encourage usage of identity API v3 * Python 3: fix glance.tests.functional.db.simple * Reuse encodeutils.to_utf8() * Fix OpenSSL DeprecationWarning on Python 3 * Added support new v2 API image filters * Add sign-the-data signature verification * Stop gridfs driver support * Updated from global requirements * Set self and schema to readOnly * Make sure the generated glance-api.conf.sample is always the same * Add unit test for default number of workers * Replace assertRaisesRegexp with assertRaisesRegex * Reuse jsonutils.dump_as_bytes() * Do not log sensitive data * Cache documentation about differences in files * Tolerate installation of pycryptodome * grammar correction in basic architecture file * Promote log message to exception level on artifact load failure * Allow mutable argument to be passed to BinaryObject artifacts * Include version number into glare factory path in paste * Fix 500 status code when we add in "depend_on" yourself * Unallowed request PATCH when work with blob * Use keystoneclient functions to receive endpoint * Drop python 2.6 support * Move Glance Artifact Repository API to separate endpoint * Imported Translations from Zanata * Imported Translations from Zanata * clean up auto-generated docs for configuration options * Update the home page * Updated from global requirements * Misspelling in message * v2 - "readOnly" key should be used in schemas * Prevent user to remove last location of the image * Fix _wait_on_task_execution() * Updating message for conversion_format cfg_opt * Fix setup.cfg * Replace exit() by sys.exit() * Change Metadefs OS::Nova::Instance to OS::Nova::Server * Change exception format checks in artifact tests * Imported Translations from Zanata * Remove glance_store specific unit tests * Encode headers to launch glance v2 on mod_wsgi * Make the task's API admin only by default * No need to have async executor fetching be a contextmanager * Updated from global requirements * Python 3: fix glance.tests.unit * Add storage_policy VMware driver option for flavors * Remove unneeded glance unit test assert calls * utils: remove PrettyTable custom class in favor of the eponym libary * Hacking checks for not using dict iteration calls * Add note in comment where upstream taskflow change is * Fix for Image members not generating notifications * Updated from global requirements * Generate page of all config options in docs * Use oslo.utils exception encoding util * Add hacking check to ensure not use xrange() * Updated from global requirements * Fix help command in cache manange and replicator * Add properties_target to Instance SW metadefs * Simplify taskflow engine loading * Allow image-list if access to attrs is forbidden * [docs] Add Domain model implementation sub-section * Drop dict.iterkeys() for python3 * Fix re-adding deleted members to an image in v1 * Replace xrange() with six.moves.range() 12.0.0.0b2 ---------- * Add metadefs for Cinder volume type configuration * Python3: Replace dict.iteritems with dict.items * Enhance description of instance-uuid option for image-create * Make cache config options clearer * Imported Translations from Zanata * Update links for CLI Reference * Python3: fix operations of dict keys() * Implement trust support for api v2 * Imported Translations from Zanata * Fix the wrong options in glance-api and glance-registry confs * Do not use api-paste.ini osprofiler options * Update the cache documentation * Updated from global requirements * Catch UnsupportedAlgorithm exceptions * Add functionality to define requests without body * Updated from global requirements * Use six.moves.reduce instead of builtin reduce * Fixing the deprecated library function * Remove Indices and tables section * Remove unused logging import * Fix Glance doesn't catches UnicodeDecodeError exception * Updated from global requirements * assertIsNone(val) instead of assertEqual(None,val) * Fix glance doesn't catches exception NotFound from glance_store * Deprecated tox -downloadcache option removed * Wait all wsgi server completion for worker exit * Fix model sync for SQLite * Update the cache middleware flavor guideline * Add sign-the-hash deprecation warning * Add db purge command * Replace oslo_utils.timeutils * Add missing CPU features to Glance Metadata Catalog * Updated from global requirements * Remove iso8601 dependency * Assert problems in Glance raised by Bandit * Import i18n functions directly * Validate empty location value for v1 api * Updated from global requirements * Added CORS support to Glance * Capitalize 'glance' in db.rst * Stop using tearDown in v1/test_api.py * Fix return 200 status code when we operate with nonexistent property * Fix default value with postgreSQL * Rename glance-store to glance_store * Run py34 env first when launching tests * Move store config opt to glance_store section * Remove artifact entry point * Remove version from setup.cfg * Add the Docker container format * Change the format of some inconsistent docstring 12.0.0.0b1 ---------- * Updated from global requirements * Automated bandit checks in glance * Port _validate_time() to Python 3 * Updated from global requirements * Support Unicode request_id on Python 3 * Unicode fix in BaseClient._do_request() on py3 * Fix incorrect task status with wrong parameter * Document contribution guidelines * Updated from global requirements * Fix glance.tests.unit.v1.test_registry_client * Fix sample Rally plugin * force releasenotes warnings to be treated as errors * V1: Fix bad dates returning 500 * Fix 500 error when filtering with specified invalid operator * Fix 500 error when filtering by 'created_at' and 'updated_at' * Update os.path.remove as it does not exist * Change the default notification exchange to glance * Add documentation for configuring disk_formats * V1: Stop id changes after the image creation * Format log messages correctly * [docs] Update description of Glance-Swift conf options * Disallow user modifing ACTIVE_IMMUTABLE of deactivated images * [docs] Update Glance architecture image * test: make enforce_type=True in CONF.set_override * OpenStack typo * Support new v2 API image filters * Remove anyjson useless requirement * Python3: fix glance.tests.unit.v2.test_registry_client * Location add catch bad Uri * [docs] delete duplicated image_status_transition.png * Reactivating admin public image returns 500 * Python3: fix glance.tests.unit.test_migrations * Python3: fix test_image_data_resource * Remove todo to remove /versions * Python3: fix test_registry_api * Updated from global requirements * Fix typos in configuring.rst * Python3: fix glance.tests.unit.v2.test_images_resource * add "unreleased" release notes page * Python 3: Fix glance.tests.unit.v2.test_tasks_resource * Python 3: fix test_image_members_resource * Remove default=None for config options * Update style for signature_utils class * Add -constraints for CI jobs * Add a deprecation warning to the DB downgrade * Remove unused exceptions from glance * Add tasks info to glance documentation * Add reno for release notes management * Add subunit.log to gitignore * Updated from global requirements * Fix content type for Forbidden exception * Port v1.test_registry_api to Python 3 * Remove requests to example.com during unit testing * Port signature_utils to Python 3 * Imported Translations from Zanata * Rename semantic-version dep to semantic_version * Port script utils to Python 3 * Use dict comprehension * Typo fix * Updated from global requirements * Port test_cache_manage to Python 3 * Port test_wsgi to Python 3 * Updated from global requirements * Fix incorrect Glance image metadata description * Rename glance-store dep to glance_store * Remove glance_store from exta requirements * Port async tests to Python 3 * Fixed registry invalid token exception handling * Updated from global requirements * Add more tests which pass on Python 3 * Show the file name when there is an error loading an image metadef file * Remove the duplicate file path created by sphinx build * [docs] Adds new image status - deactivated * Cause forbidden when deactivating image(non-admin) * Updated from global requirements * Don't allow queries with 'IN' predicate with an empty sequence * utils: use oslo_utils.uuidutils * utils: remove unused functions in glance.utils * Bodies that are not dicts or lists return 400 * Pass CONF to logging setup * Fix 500 error when filtering by invalid version string * Fix error when downloading image status is not active 11.0.0 ------ * Add 'deactivated' status to image schema * Allow owner to be set on image create * Decrease test failure if second changes during run * config: remove default lockutils set * Catch InvalidImageStatusTransition error * Port rpc and wsgi to Python 3 * Refactoring exceptions * Fix glance ignored a headers when created artifact * Add ability to specify headers in PUT/PATCH request in functional tests * Fix 500 error when we specify invalid headers when work with blob/bloblist * fix a typo in show_multiple_locations help message * Updated from global requirements * Add testresources and testscenarios used by oslo.db fixture * Add testresources and testscenarios used by oslo.db fixture * Add 'deactivated' status to image schema * Fix the bug of "Error spelling of a word" * Imported Translations from Zanata * Fix 409 response when updating an image by removing read-only property 11.0.0.0rc2 ----------- * Imported Translations from Zanata * Updated from global requirements * Port api test_common to Python 3 * An explicit check for the presence of a property * Cleanup chunks for deleted image if token expired * Download forbidden when get_image_location is set * Download forbidden when get_image_location is set * tell pbr to tell sphinx to treat warnings as errors * add placeholder to ensure _static directory exists * add the man pages to the toctree * escape underline introducing a spurrious link reference * do not indent include directives * add missing document to toctree * fix restructuredtext formatting errors * Catch NotAuthenticated exception in import task * Cleanup chunks for deleted image if token expired * Catch NotAuthenticated exception in import task * Imported Translations from Zanata * Return missing authtoken options * Change string generation mechanism for info logging * Add Large pages meta definition * Return missing authtoken options * Fix mutable defaults in tests * Imported Translations from Zanata 11.0.0.0rc1 ----------- * Open Mitaka development * Cleanup of Translations * Remove redundant requirements.txt from tox * Add swiftclient to test-requirements * Updated from global requirements * Update Glance example configs to reflect Liberty * Imported Translations from Zanata * Fix server start ping timeout for functional tests * Prevent image status being directly modified via v1 * Fixed the output of list artifacts API calls * Change ignore-errors to ignore_errors * Prevent extraneous log messages and stdout prints * [Glance Developer Guide] Grammar edits * utils: stop building useless closure * Remove `openstack' directory * Imported Translations from Zanata * Fixes the possibility of leaving orphaned data * Add missing function '_validate_limit' * Fix wrong parameters order in Task * Remove WARN log message from version_negotiation * Fix order of arguments in assertEqual * Scrub images in parallel * Make task_time_to_live work * Incorrect permissions on database migration file * Add _member_ to property-protections-roles.conf.sample * Domain model section * Add unit tests for signature_utils class * Scrubber to communicate with trustedauth registry * Corrected hyperlink in metadefs documentation * Remove pointless tests comparing opts against list * Remove old traces of the oslo-incubator * Updated from global requirements * Use oslo utils to encode exception messages * clean up requirements 11.0.0.0b3 ---------- * Disable v3 API by default * Glance metadef tables need unique constraints * Add image signing verification * Don't return 300 when requesting /versions * Updated from global requirements * Use min and max on IntOpt option types * Fixed non-owner write-access to artifacts * Remove WritableLogger from wsgi server * Allow to filter artifacts by range * Fixed version unequality artifact filtering * Artifacts are now properly filtered by dict props * Fixed an HTTP 500 on artifact blob upload * Port rally scenario plugin to new Rally framework * Use stevedore directive to document plugins * Catch update to a non-existent artifact property * Fix spelling mistake in test_images.py * Change URL to End User Guide * Fix URLs to admin-guide-cloud * reuse the deleted image-member before create a new image-member * Imported Translations from Transifex * Add CPU Pinning in metadata definitions * Fix image owner can't be changed issue in v2 * Port common.utils to Python 3 * Port store image to Python 3 * Port replicator to Python 3 * Port glance.db to Python 3 * Port image cache to Python 3 * Fix Python 3 issues in glance.tests.unit.common * Don't use slashes for long lines - use parentheses instead * Updated from global requirements * Imported Translations from Transifex * Don't import files with backed files * Use oslo_config PortOpt support * Setting default max_request_id_length to 64 * Add mechanism to limit Request ID size * return request_id in case of 500 error * Remove no longer used parameter (FEATURE_BLACKLIST) * Fixed few typos * Correct the indentation on a few functions * Use dictionary literal for dictionary creation * List creation could be rewritten as a list literal * Remove duplicate name attribute * Incorrect variable name is declared * Fix Request ID has a double 'req-' at the start * Fix few typos in glance * Updated from global requirements * Fix 501 error when accessing the server with a non-existent method * Imported Translations from Transifex * Fix existing migrations to create utf-8 tables for MySQL DB * Remove Catalog Index Service * Fix error message's format in image_member * Include metadefs files in all packages 11.0.0.0b2 ---------- * Move to using futurist library for taskflow executors * Updated from global requirements * Glance to handle exceptions from glance_store * Keeping the configuration file with convention * Fix Python 3 issues in glance.tests.unit * Allow ramdisk_id, kernel_id to be null on schema * Remove duplicate string * Imported Translations from Transifex * Update glance_store requirement to 0.7.1 * Fix Rally job failure * Make utf8 the default charset for mysql * Use oslo_utils.encodeutils.exception_to_unicode() * Updated from global requirements * Remove H302,H402,H904 * add annotation of param * Adds a rados_connect_timeout description * Fix the document bug in part of digest_algorithm * Purge dead file-backed scrubber queue code * Correct reference to VC as vCenter * Remove usage of assert_called_once in mocks * Rationalize test asserts * Add .eggs/* to .gitignore * Refactoring of image-members v2 API implementation * Improve code readability in functional test for the WSGIServer * Make 'id' a read only property for v2 * Healthcheck Middleware * Updated from global requirements * Functional of the HTTPclient was put in own method * Fix wrong check when create image without data * Remove unneeded OS_TEST_DBAPI_ADMIN_CONNECTION * glance metadef resource-type-associate fails in postgresql * Change default digest_algorithm value to sha256 * Update requirements * Remove unused oslo incubator files * Remove unnecessary mixin from artifact domain model * Adds os_admin_user to common OS image prop metadef * Validate size of 'min_ram' and 'min_disk' * Remove unused imported marker functions * Fix duplicate unique constraint in sqlite migration * Fix broken URL to docs.openstack.org * Remove unnecessary executable permission * Fix the db_sync problem in 039 for db2 * Imported Translations from Transifex * Fix OSProfiler exception when is enabled * Add an API call to discover the list of available artifact types 11.0.0.0b1 ---------- * Provide extra parameter for FakeDB * Switch to oslo.service * tests: don't hardcode strace usage * Fix tox -e py34 * Imported Translations from Transifex * Typo fix * Drop use of 'oslo' namespace package * Update version for Liberty 11.0.0a0 -------- * Add client_socket_timeout option * Switch from MySQL-python to PyMySQL * Fix grammar in installation documentation * Use ConfigFixture to ensure config settings are reverted * Change status code from 500 to 400 for image update request * Added test for "delete image member for public image" * Pass environment variables of proxy to tox * Add info how to avoid issues with token expiration * Fix Python 3 issues * Cleanup TODO in glance/gateway.py for elasticsearch being unavailable * Fix DbError when image params are out of range * REST API layer for Artifact Repository * Remove duplicate creation of use_user_token * Correct bad documentation merge * Sync with latest oslo-incubator * Fix HTTP 500 on NotAuthenticated in registry (v2) * Domain layer for Artifact Repository * Refactoring registry tests for v2 * Return empty str for permissive, none, properties * Fix typo in the code * Fixed error message for negative values of min_disk and min_ram * Changes in rally-jobs/README.rst * Make create task as non-blocking * Mark task as failed in case of flow failure * Add VMDK as a conversion format to convert flow * Make properties roles check case-insensitive * Imported Translations from Transifex * Change generic NotFound to ImageNotFound exception * Remove is_public from domain layer * Leverage dict comprehension in PEP-0274 * Fix Server.start() on Python 3.4 * Use six.moves to fix imports on Python 3 * Imported Translations from Transifex * Bug : tox -egenconfig failure (no glance-search.conf) * Replace types.NameType with name * Fix test_opts to not resolve requirements * Fix logging task id when task fails * Fix typo in documentation * rpc: remove wrong default value in allowed exceptions * rpc: clean JSON serializer, remove strtime() usage * Set filesystem_store_datadir in tests * Taskflow engine mode should be parallel in sample conf * VMware: vmware_ostype should be enum * VMware: add VirtualVmxnet3 to hw_vif_model * Fixed glance.tests.unit.test_artifacts_plugin_loader unit-test * Fix delayed activation without disk and containers formats * Save image data after setting the data * Make sure the converted image is imported * Updated from global requirements * Imported Translations from Transifex * Register oslo.log's config options in tests * Remove string formatting from policy logging * Remove unneeded setup hook from setup.cfg * Drop use of 'oslo' namespace package 2015.1.0 -------- * Metadef JSON files need to be updated * Plugin types are not exposed to the client * v1 API should be in SUPPORTED status * Read tag name instead of ID * v1 API should be in SUPPORTED status * API calls to Registry now maintain Request IDs * Updated from global requirements * Remove ordereddict from requirements * Release Import of Translations from Transifex * Glance database architecture section * update .gitreview for stable/kilo * Plugin types are not exposed to the client * Revert "Reduce DB calls when getting an image" * Read tag name instead of ID * Metadef JSON files need to be updated * Fix wrong docstring by copy-paste * Add logging when policies forbid an action * Remove non-ascii characters in glance/doc/source/architecture.rst * Fix typos in glance/doc/source/configuring.rst * Correct text in error response 2015.1.0rc1 ----------- * Fixes glance-manage exporting meta definitions issue * Catch UnknownScheme exception * Refactor API function test class * Move elasticsearch dep to test-requirements.txt * Update openstack-common reference in openstack/common/README * glance-manage output when ran without any arguments * Reduce DB calls when getting an image * Open Liberty development * Zero downtime config reload (glance-control) * Imported Translations from Transifex * Glance cache to not prune newly cached images * glance-manage db load_metadefs does not load all resource_type_associations * Fix intermittent unit test failures * Fix intermittent test case failure due to dict order * Imported Translations from Transifex * A mixin for jsonpatch requests validation * Artifact Plugins Loader * Declarative definitions of Artifact Types * Creating metadef object without any properties * Zero downtime config reload (log handling) * Database layer for Artifact Repository * Catalog Index Service - Index Update * Catalog Index Service * Zero downtime config reload (socket handling) * Typo in pylintrc file * Fix metadef tags migrations * Update documentation for glance-manage * Fix common misspellings 2015.1.0b3 ---------- * Replace assert statements with proper control-flow * Remove use of contextlib.nested * Use graduated oslo.policy * oslo: migrate namespace-less import paths * Fix typo in rpc controller * Fixes typo in doc-string * wsgi: clean JSON serializer * Remove scrubber cleanup logic * use is_valid_port from oslo.utils * Add ability to deactivate an image * Remove deprecated option db_enforce_mysql_charset * Raise exception if store location URL not found * Fix missing translations for error and info * Basic support for image conversion * Extend images api v2 with new sorting syntax * Add the ability to specify the sort dir for each key * Move to graduated oslo.log module * Provide a way to upgrade metadata definitions * Pass a real image target to the policy enforcer * Glance basic architecture section * Fix typo in configuration file * Updated from global requirements * Add sync check for models_metadef * Notifications for metadefinition resources * Update config and docs for multiple datastores support * Avoid usability regression when generating config * Glance Image Introspection * Add capabilities to storage driver * Updated from global requirements * Zero downtime configuration reload * Add operators to provide multivalue support * Remove the eventlet executor * SemVer utility to store object versions in DB * Switch to latest oslo-incubator * Use oslo_config choices support * Fix the wrong format in the example * Remove en_US translation * Git ignore covhtml directory * db_export_metadefs generates inappropriate json files * Synchronising oslo-incubator service module * Unify using six.moves.range rename everywhere * Updated from global requirements * Glance returns HTTP 500 for image download * Remove boto from requirements.txt * Unbreak python-swiftclient gate * Eventlet green threads not released back to pool * Imported Translations from Transifex * Removes unnecessary assert * Prevents swap files from being found by Git * Add BadStoreConfiguration handling to glance-api * Remove redundant parentheses in conditional statements * Make sure the parameter has the consistent meaning * Image data remains in backend for deleted image * Remove is_public from reserved attribute in v2 * unify some messages * Typos fixed in the comments * The metadef tags create api does not match blue-print * Clarified doc of public_endpoint config option * Add detail description of image_cache_max_size * Updated from global requirements 2015.1.0b2 ---------- * Add Support for TaskFlow Executor * Include readonly flag in metadef API * Fix for CooperativeReader to process read length * Software Metadata Definitions * Updated from global requirements * Rewrite SSL tests * Replace snet config with endpoint config * Simplify context by using oslo.context * Handle empty request body with chunked encoding * Update vmware_adaptertype metadef values * Typos fixed in the comments * Updated from global requirements * Redundant __init__ def in api.authorization.MetadefTagProxy * Make digest algorithm configurable * Switch to mox3 * Remove argparse from requirement * Remove optparse from glance-replicator * Eliminate shell param from subprocesses in tests * Remove test dependency on curl * Cleanup chunks for deleted image that was 'saving' * remove need for netaddr * Fix copy-from when user_storage_quota is enabled * remove extraneous --concurrency line in tox * SQL scripts should not manage transactions * Fixes line continuations * Upgrade to hacking 0.10 * Removed python-cinderclient from requirements.txt * Move from oslo.db to oslo_db * Move from oslo.config to oslo_config * Improve documentation for glance_stores * Fix reference to "stores" from deprecated name * Move from oslo.utils to oslo_utils * Updated from global requirements * Updated from global requirements * Prevent file, swift+config and filesystem schemes * Simplify usage of str.startswith * Adding filesystem schema check in async task * Fix spelling typo * Fix rendering of readme document * Imported Translations from Transifex * Add swift_store_cacert to config files and docs * Add latest swift options in glance-cache.conf * Fix document issue of image recover status * rename oslo.concurrency to oslo_concurrency * Provide a quick way to run flake8 * Fix 3 intermittently failing tests * Removed obsolete db_auto_create configuration option * Fix client side i18n support for v1 api * Move default_store option in glance-api.conf * Removes http-requests to glance/example.com in glance test * Remove _i18n from openstack-common * Adds the ability to sort images with multiple keys * Add sort key validation in v2 api * Fixes typo: glance exception additional dot * Allow $OS_AUTH_URL environment variable to override config file value * Bump API version to 2.3 * Replace '_' with '_LI', '_LE', '_LW', '_LC' 2015.1.0b1 ---------- * Removes unused modules: timeutils and importutils * Generate glance-manage.conf * Imported Translations from Transifex * Adding Metadef Tag support * Removed unnecessary dot(.) from log message * Using oslo.concurrency lib * Update config and docs for Multiple Containers * To prevent client use v2 patch api to handle file and swift location * Updated from global requirements * Use testr directly from tox * Remove reliance on import order of oslo.db mods * Remove openstack.common.gettextutils module * Fix typo in common module * Fix and add a test case for IPv6 * Start server message changed * Fix getaddrinfo if dnspython is installed * Workflow documentation is now in infra-manual * Allow None values to be returned from the API * Expose nullable fields properties * Allow some fields to be None * Update glance.openstack.common.policy and cleanup * A small refactoring of the domain * Updated from global requirements * Disable osprofiler by default * Work toward Python 3.4 support and testing * Correct GlanceStoreException to provide valid message - Glance * Remove Python 2.6 classifier * Add ModelSMigrationSync classes * Alter models and add migration * No 4 byte unicode allowed in image parameters * Update rally-jobs files * Move from using _ builtin to using glance.i18n _ * Change Glance to use i18n instead of gettextutils * Raising glance logging levels * Imported Translations from Transifex * Do not use LazyPluggable * metadef modules should only use - from wsme.rest import json * Wrong order of assertEquals args(Glance) * Removal of unnecessary sample file from repository * Upgrade tests' mocks to match glance_store * Remove exception declarations from replicator.py * Typo correction of the prefix value in compute-host-capabilities * Replace custom lazy loading by stevedore * vim ropeproject directories added to gitignore * Initiate deletion of image files if the import was interrupted * Raise an exception when quota config parameter is broken * Fix context storage bug * Ignore Eric IDE files and folders in git * Make RequestContext use auth_token (not auth_tok) * Swift Multi-tenant store: Pass context on upload * Use unicode for error message * change default value for s3_store_host * remove url-path from the default value of s3_store_host * Complete the change of adding public_endpoint option * Update the vmware_disktype metadefs values * Add config option to override url for versions * Separate glance and eventlet wsgi logging * Remove openstack.common.test * Remove modules from openstack-common.conf * Improve error log for expired image location url * Handle some exceptions of image_create v2 api * Remove eventlet_hub option * Adds openSUSE in the installing documentation * Glance scrubber should page thru images from registry * Add logging to image_members and image_tags * Update glance.openstack.common 2014.2 ------ * Fix options and their groups - etc/glance-api.conf * Fix options and their groups - etc/glance-api.conf * Adjust authentication.rst doc to reference "identity_uri" * Can not delete images if db deadlock occurs * Reduce extraneous test output * Isolate test from environment variables * Fix for adopt glance.store library in Glance * Adjust authentication.rst doc to reference "identity_uri" 2014.2.rc2 ---------- * Use identity_uri instead of older fragments * Prevent setting swift+config locations * Metadef schema column name is a reserved word in MySQL * Remove stale chunks when failed to update image to registry * GET property which name includes resource type prefix * g-api raises 500 error while uploading image * Fix for Adopt glance.store library in Glance * Update Metadefs associated with ImagePropertiesFilter * updated translations * Use ID for namespace generated by DB * Metadef Property and Object schema columns should use JSONEncodedDict * Add missing metadefs for shutdown behavior * Update driver metadata definitions to Juno * Mark custom properties in image schema as non-base * Specify the MetadefNamespace.namespace column is not nullable * Make compute-trust.json compatible with TrustFilter * Include Metadata Defs Concepts in Dev Docs * Nova instance config drive Metadata Definition * Add missing metadefs for Aggregate Filters * Updated from global requirements 2014.2.rc1 ---------- * Imported Translations from Transifex * Add specific docs build option to tox * Add documentation for a new storage file permissions option * Updated from global requirements * Remove db_enforce_mysql_charset option for db_sync of glance-manage * Fix assertEqual arguments order * Prevent setting swift+config locations * Remove stale chunks when failed to update image to registry * Use specific exceptions instead of the general MetadefRecordNotFound * Metadef schema column name is a reserved word in MySQL * Fix for Adopt glance.store library in Glance * GET property which name includes resource type prefix * Incorrect parameters passed * g-api raises 500 error while uploading image * Minor style tidy up in metadata code * Metadef Property and Object schema columns should use JSONEncodedDict * Updated from global requirements * Use ID for namespace generated by DB * Switch to oslo.serialization * Switch to oslo.utils * Imported Translations from Transifex * Add missing metadefs for shutdown behavior * hacking: upgrade to 0.9.x serie * Fix bad header bug in glance-replicator * Run tests with default concurrency 0 * Refactor test_migrations module * Include Metadata Defs Concepts in Dev Docs * Open Kilo development * Mark custom properties in image schema as non-base * Fix missing space in user_storage_quota help message * Fix glance V2 incorrectly implements JSON Patch'add' * Make compute-trust.json compatible with TrustFilter * replace dict.iteritems() with six.iteritems(dict) * Enforce using six.text_type() over unicode() * Update driver metadata definitions to Juno * Remove uses of unicode() builtin * Fixes Error Calling GET on V1 Registry * Enabling separated sample config file generation * Update Metadefs associated with ImagePropertiesFilter * Fixes logging in image_import's main module * Refactor metadef ORM classes to use to_dict instead of as_dict * Stop using intersphinx * Just call register_opts in tests * Replaces assertEqual with assertTrue and assertFalse * Block sqlalchemy-migrate 0.9.2 * Specify the MetadefNamespace.namespace column is not nullable * Add missing metadefs for Aggregate Filters * Nova instance config drive Metadata Definition * Improve OS::Compute::HostCapabilities description * Sync glance docs with metadefs api changes * Change open(file) to with block * Fix CommonImageProperties missing ":" * Fix VMware Namespace capitalization & description * Imported Translations from Transifex * Duplicated image id return 409 instead of 500 in API v2 * Glance API V2 can't recognize parameter 'id' * API support for random access to images * Adopt glance.store library in Glance * Adds missing db registry api tests for Tasks * warn against sorting requirements * Introduces eventlet executor for Glance Tasks 2014.2.b3 --------- * Glance Metadata Definitions Catalog - API * ignore .idea folder in glance * Glance Metadata Definitions Catalog - Seed * Glance Metadata Definitions Catalog - DB * Restrict users from downloading protected image * Syncing changes from oslo-incubator policy engine * Use identity_uri instead of older fragments * Fix legacy tests using system policy.json file * Improve Glance profiling * Fix collection order issues and unit test failures * Check on schemes not stores * Replacement mox by mock * Imported Translations from Transifex * Log task ID when the task status changes * Changes HTTP response code for unsupported methods * Enforce image_size_cap on v2 upload * Do not assume order of images * Ensure constant order when setting all image tags * Fix bad indentation in glance * Use @mock.patch.object instead of mock.MagicMock * Adding status field to image location -- scrubber queue switching * Bump osprofiler requirement to 0.3.0 * Fix migration on older postgres * Fix rally performance job in glance * Integrate OSprofiler and Glance * Fix image killed after deletion * VMware store: Use the Content-Length if available * Fix RBD store to use READ_CHUNKSIZE * Trivial fix typo: Unavilable to Unavailable * Quota column name 'key' in downgrade script * Do not log password in swift URLs in g-registry * Updated from global requirements * Use `_LW` where appropriate in db/sqla/api * Log upload failed exception trace rather than debug * Decouple read chunk size from write chunk size * Enable F821 check: undefined name 'name' 2014.2.b2 --------- * Security hardening: fix possible shell injection vulnerability * Move to oslo.db * Catch exception.InUseByStore at API layer * Fixes the failure of updating or deleting image empty property * Adding status field to image location -- scrubber changes * Also run v2 functional tests with registry * Refactoring Glance logging lowering levels * Set defaults for amqp in glance-registry.conf * Fix typo in swift store message * Add a `_retry_on_deadlock` decorator * Use auth_token from keystonemiddleware * Allow some property operations when quota exceeded * Raising 400 Bad Request when using "changes-since" filter on v2 * Moving eventlet.hubs.use_hub call up * Adding status field to image location -- domain and APIs changes * Add task functions to v2 registry * Changing replicator to use openstack.common.log * Fix unsaved exception in v1 API controller * Pass Message object to webob exception * Some exceptions raise UnicodeError * Handle session timeout in the VMware store * Some v2 exceptions raise unicodeError * Resolving the performance issue for image listing of v2 API on server * Switch over oslo.i18n * Fix typo in comment * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Raise NotImplementedError instead of NotImplemented * Fix unsaved exception in store.rbd.Store.add() * Fix docstrings in enforce() and check() policy methods * Added an extra parameter to the df command * Add CONTRIBUTING.rst * Imported Translations from Transifex * Use (# of CPUs) glance workers by default * Sync processutils and lockutils from oslo with deps * Document registry 'workers' option * Removing translation from debug messages * Unifies how BadStoreUri gets raised and logged * Fix lazy translation UnicodeErrors * Changing Sheepdog driver to use correct configuration function * Implemented S3 multi-part upload functionality * Log swift container creation * Synced jsonutils and its dependencies from oslo-incubator * Remove user and key from location in swift * Updated from global requirements * Changed psutil dep. to match global requirements * Add pluging sample for glance gate * Fixes v2 return status on unauthorized download * Update documentation surrounding the api and registry servers * Do not call configure several times at startup * Move `location`'s domain code out of glance.store * sync oslo incubator code * notifier: remove notifier_strategy compat support * notifier: simply notifier_strategy compat support * colorizer: use staticmethod rather than classmethod * Improved coverage for glance.api.* * Assign local variable in api.v2.image_data 2014.2.b1 --------- * Use df(1) in a portable way * Add test for no_translate_debug_logs hacking check * Add hacking checks * replace dict.iteritems() with six.iteritems(dict) * make uploading an image as public admin only by default * remove default=None for config options * Bump python-swiftclient version * TaskTest:test_fail() should use asserIstNone * debug level logs should not be translated * use /usr/bin/env python instead of /usr/bin/python * Remove all mostly untranslated PO files * Remove duplicated is_uuid_like() function * fixed typos found by RETF rules in RST files * Use safe way through "with" statement to work with files * Clean up openstack-common.conf * Removing duplicate entry from base_conf * Use safe way through "with" statement to work with files * Use Chunked transfer encoding in the VMware store * Ensures that task.message is of type unicode * Replace unicode() for six.text_type * Prevent creation of http images with invalid URIs * Fixed a handful of typos * Fixes installation of test-requirements * Add rally performance gate job for glance * To fixes import error for run_tests.sh * Replace assert* with more suitable asserts in unit tests * Get rid of TaskDetails in favor of TaskStub * Fixes "bad format" in replicator for valid hosts * Sync latest network_utils module from Oslo * Fixes spelling error in test name * Uses None instead of mutables for function param defaults * Fix various Pep8 1.5.4 errors * Fixes Glance Registry V2 client * Update Glance configuration sample files for database options * To prevent remote code injection on Sheepdog store * Added undescore function to some log messages * Adds TaskStub class * Updated from global requirements * user_storage_quota now accepts units with value * Do not allow HEAD images/detail * Configuration doc for VMware storage backend * Catch loading failures if transport_url is not set * Fix Jenkins translation jobs * Fixed the pydev error message 2014.1.rc1 ---------- * Open Juno development * Making DB sanity checking be optional for DB migration * Fix deprecation warning in test_multiprocessing * Do not set Location header on HTTP/OK (200) responses * Fix swift functional test "test_create_store" * Sanitize set passed to jsonutils.dumps() * When re-raising exceptions, use save_and_reraise * Imported Translations from Transifex * Sync common db code from Oslo * Return 405 when attempting DELETE on /tasks * Remove openstack.common.fixture * Enable H304 check * VMware store.add to return the image size uploaded * registry: log errors on failure * Removes use of timeutils.set_time_override * Provide explicit image create value for test_image_paginate case * Make the VMware datastore backend more robust * Pass Message object to webob exception * Detect MultiDict when generating json body * Makes possible to enable Registry API v1 and v2 * Do not use __builtin__ in python3 * Updated from global requirements * Fix swift functional test * Provide an upgrade period for enabling stores * API v2: Allow GET on unowned images with show_image_direct_url * Add copyright text to glance/openstack/common/__init__.py * Don't enable all stores by default * Remove unused methods * Fix glance db migration failed on 031 * Document for API message localization 2014.1.b3 --------- * Add support for API message localization * Add the OVA container format * Store URI must start with the expected URI scheme * Documentation for Glance tasks * Remove import specific validation from tasks resource * Remove dependency of test_v1_api on other tests * Include Location header in POST /tasks response * Catch exception when image cache pruning * VMware storage backend should use oslo.vmware * Sync common db code from Oslo * Refactor UUID test * Replaced calls of get(foo, None) -> get(foo) * Use six.StringIO/BytesIO instead of StringIO.StringIO * Replaced "...\'%s\'..." with "...'%s'..." * Updated from global requirements * Fix logging context to include user_identity * Log 'image_id' with all BadStoreURI error messages * Added undescore function to some strings * Use 0-based indices for location entries * Glance all: Replace basestring by six for python3 compatability * Delete image metadata after image is deleted * Modify assert statement when comparing with None * Enable hacking H301 and disable H304, H302 * Replacement mox by mock * Keep py3.X compatibility for urllib * Use uuid instead of uuidutils * Use six.moves.urllib.parse instead of urlparse * Switch over to oslosphinx * Fix parsing of AMQP configuration * Add `virtual_size` to Glance's API v2 * Add a virtual_size attribute to the Image model * Enable F841 check * Add support for PartialTask list * Rename Openstack to OpenStack * Add a mailmap entry for myself * Sync log.py from oslo * Add unit tests around glance-manage * Remove tox locale overrides * Improve help strings * Provide explicit image create value in Registry v2 API test * VMware Datastore storage backend * Adding status field to image location -- DB migration * Apply image location selection strategy * Switch to testrepository for running tests * Clean up DatabaseMigrationError * Enable H302 check * Fix misspellings in glance * Expose image property 'owner' in v2 API * Removes logging of location uri * Updated from global requirements * Remove duplicate type defination of v2 images schema * Enable H202 check * Modify my mailmap * glance-manage wont take version into consideration * Move scrubber outside the store package * Depending on python-swiftclient>=1.6 * Now psutil>=1.1.0 is actually on PyPI * Fix indentation errors found by Pep8 1.4.6+ * Add VMware storage backend to location strategy * Log a warning when a create fails due to quota * glance requires pyOpenSSL>=0.11 * Imported Translations from Transifex * Restore image status to 'queued' if upload failed * Don't override transport_url with old configs * Provide explicit image create value in Registry v2 Client test * Provide explicit task create and update value in controller tests * Enable hacking H703 check * Sync with global requirements * Sync oslo.messaging version with global-requirements * Don't rewrite the NotFound error message * Update all the glance manpages * Use common db migrations module from Oslo * Check --store parameter validity before _reserve * Sync gettextutils from Oslo * Enable gating on H501 * Add multifilesystem store to support NFS servers as backend * Check first matching rule for protected properties * Retry failed image download from Swift * Restore image status on duplicate image upload 2014.1.b2 --------- * Tests added for glance/cmd/cache_pruner.py * Prevent E500 when delayed delete is enabled * Sync unhandled exception logging change from Oslo * Check image id format before executing operations * fix bug:range() is not same in py3.x and py2.x * Fix the incorrect log message when creating images * Adding image location selection strategies * Fix inconsistent doc string and code of db_sync * fixing typo in rst file * Fix tmp DB path calculation for test_migrations.py * Change assertTrue(isinstance()) by optimal assert * add log for _get_images method * Makes 'expires_at' not appear if not set on task * Remove vim header * Update the glance-api manpage * Remove 'openstack/common/context.py' * Allow users to customize max header size * Decouple the config dependence on glance domain * Fix typo in doc string * Prevent min_disk and min_ram from being negative * Set image size to None after removing all locations * Update README to the valid Oslo-incubator doc * Cleans up imports in models.py * Sync Log levels from OSLO * Align glance-api.conf rbd option defaults with config * Bump hacking to 0.8 and get python 3.x compatibility * Add config option to limit image locations * replace type calls with isinstance * Adding logs to tasks * Skip unconfigurable drivers for store initialization * Fix typo in gridfs store * Oslo sync to recover from db2 server disconnects * fix comments and docstrings misspelled words * Fix call to store.safe_delete_from_backend * Switch to Hacking 0.8.x * assertEquals is deprecated, use assertEqual (H234) * Consider @,! in properties protection rule as a configuration error * Remove unused imports in glance * Remove return stmt of add,save and remove method * Migrate json to glance.openstack.common.jsonutils * Use common Oslo database session * Define sheepdog_port as an integer value * Sync with oslo-incubator (git 6827012) * Enable gating on F811 (duplicate function definition) * Set image size after updating/adding locations * Disallow negative image sizes * Fix and enable gating on H306 * Make code base E125 and E126 compliant * Fix 031 migration failed on DB2 * Remove the redundant code * Correct URL in v1 test_get_images_unauthorized * Refactor tests.unit.utils:FakeDB.reset * Fixed wrong string format in glance.api.v2.image_data * Empty files shouldn't contain copyright nor license * Use uuid instead of uuidutils * Enable H233/H301/H302 tests that are ignored at the moment * Remove duplicate method implementations in ImageLocationsProxy * Make Glance code base H102 compliant * Make Glance code base H201 compliant * Cleanup: remove unused code from store_utils * Filter out deleted images from storage usage * Add db2 communication error code when check the db connection * Refine output of glance service managment * Adds guard against upload contention * Fixes HTTP 500 when updating image with locations for V2 * Increase test coverage for glance.common.wsgi * Return 204 when image data does not exist * V2: disallow image format update for active status * Enable tasks REST API for async worker * Cleanly fail when location URI is malformed * Rename duplicate test_add_copy_from_upload_image_unauthorized * Adding missing copy_from policy from policy.json * Fix simple-db image filtering on extra properties * Pin sphinx to <1.2 * assertEquals is deprecated, use assertEqual instead * Fix and enable gating on H702 * Replace startswith by more precise store matching * Remove unused exceptions * Remove duplicate method __getitem__ in quota/__init__.py * Enforce copy_from policy during image-update * Refactor StorageQuotaFull test cases in test_quota * remove hardcode of usage * Added error logging for http store * Forbidden update message diffs images/tasks/member * Unittests added for glance/cmd/cache_manage.py * Makes tasks owner not nullable in models.py * Move is_image_sharable to registry api * Remove TestRegistryDB dependency on TestRegistryAPI * Introduce Task Info Table 2014.1.b1 --------- * Migrate to oslo.messaging * Add config option to limit image members * Add config option to limit image tags * Glance image-list failed when image number exceed DEFAULT_PAGE_SIZE * DB migration changes to support DB2 as sqlalchemy backend * Add documentation for some API parameters * RBD add() now returns correct size if given zero * Set upload_image policy to control data upload * Replace deprecated method assertEquals * Clean up duplicate code in v2.image_data.py * Fix docstring on detail in glance/api/v1/images.py * Use assertEqual instead of assertEquals in unit tests * Remove unused package in requirement.txt * Enable F40X checking * Verify for duplicate location+metadata instances * Adds domain level support for tasks * Add eclipse project files to .gitignore * Added unit tests for api/middleware/cache_manage.py * Fixed quotes in _assert_tables() method * Use common db model class from Oslo * Add upload policy for glance v2 api * Adding an image status transition diagram for dev doc * Add config option to limit image properties * Explicit listing of Glance policies in json file * Imported Translations from Transifex * Sync openstack.common.local from oslo * Clean up numeric expressions with oslo constants * Don't use deprecated module commands * Add tests for glance/notifier/notify_kombu * Fixes image delete and upload contention * Log unhandled exceptions * Add tests for glance/image_cache/client.py * Remove lxml requirement * Sync common db and db.sqlalchemy code from Oslo * Update glance/opensatck/common from oslo Part 3 * Tests added for glance/cmd/cache_cleaner.py * glance-manage should work like nova-manage * Adds tasks to db api * Sync lockutils from oslo * sync log from oslo * Add policy style '@'/'!' rules to prop protections * Enable H501: do not use locals() for formatting * Remove use of locals() when creating messages * Remove "image_cache_invalid_entry_grace_period" option * Add unit test cases for get func of db member repo * assertEquals is deprecated, use assertEqual * Document default log location in config files * Remove unused method setup_logging * Start using PyFlakes and Hacking * Sync units module from olso * Fixes error message encoding issue when using qpid * Use mock in test_policy * Use packaged version of ordereddict * Imported Translations from Transifex * Glance v2: Include image/member id in 404 Response * Replace qpid_host with qpid_hostname * Fix Pep8 1.4.6 warnings * Fixes content-type checking for image uploading in API v1 and v2 * Update my mailmap * Addition of third example for Property Protections * Sync iso8601 requirement and fixes test case failures * Fixes wrong Qpid protocol configuration * Use HTTP storage to test copy file functionality * Remove redundant dependencies in test-requirements * Documentation for using policies for protected properties * checking length of argument list in "glance-cache-image" command * optimize queries for image-list * Using policies for protected properties * Cleanup and make HACKING.rst DRYer * Enable tasks data model and table for async worker * Updated from global requirements * Add call to get specific image member * Put formatting operation outside localisation call * Remove unused import * The V2 Api should delete a non existent image * Avoid printing URIs which can contain credentials * Remove whitespace from cfg options * Use Unix style LF instead of DOS style CRLF * Adding 'download_image' policy enforcement to image cache middleware * Glance manage should parse glance-api.conf * Fixes rbd _delete_image snapshot with missing image * Correct documentation related to protected properties * Update functional tests for swift changes * Removed unsued import, HTTPError in v1/images.py * Allow tests to run with both provenances of mox * Glance GET /v2/images fails with 500 due to erroneous policy check * Do not allow the same member to be added twice 2013.2.rc1 ---------- * V2 RpcApi should register when db pool is enabled * Imported Translations from Transifex * Open Icehouse development * Convert Windows to Unix style line endings * Add documentation for property protections * Adding checking to prevent conflict image size * Fixes V2 member-create allows adding an empty tenantId as member * Fixing glance-api hangs in the qpid notifier * Change response code for successful delete image member to 204 * Cache cleaner wrongly deletes cache for non invalid images * Require oslo.config 1.2.0 final * Use built-in print() instead of print statement * Swift store add should not use wildcard raise * Corrected v2 image sharing documentation * Add swift_store_ssl_compression param * Log a message when image object not found in swift * Ensure prop protections are read/enforced in order * Funtional Tests should call glance.db.get_api * Enclose command args in with_venv.sh * Fix typo in config string * Adding encryption support for image multiple locations * Fixes typos of v1 meta data in glanceapi.rst * Respond with 410 after upload if image was deleted * Fix misused assertTrue in unit tests * Convert location meta data from pickle to string * Disallow access/modify members of deleted image * Fix typo in protected property message * Remove the unused mapper of image member create * Changed header from LLC to Foundation based on trademark policies * Implement protected properties for API v1 * Add rbd store support for zero size image * Remove start index 0 in range() * Convert non-English exception message when a store loading error * add missing index for 'owner' column on images table * Publish recent api changes as v2.2 * Update schema descriptions to indicate readonly * Enable protected properties in gateway * Property Protection Layer * Rule parser for property protections * Scrubber refactoring * Fix typo in IMAGE_META_HEADERS * Fix localisation string usage * Notify error not called on upload errors in V2 * Fixes files with wrong bitmode * Remove unused local vars * Clean up data when store receiving image occurs error * Show traceback info if a functional test fails * Add a storage quota * Avoid redefinition of test * Fix useless assertTrue * emit warning while running flake8 without virtual env * Fix up trivial License mismatches * Introduced DB pooling for non blocking DB calls * Use latest Oslo's version * Improve the error msg of v2 image_data.py * Fix Sphinx warning * Remove unused import * test failure induced by reading system config file * Prefetcher should perform data integrity check * Make size/checksum immutable for active images * Remove unused var DEFAULT_MAX_CACHE_SIZE * Implement image query by tag * Remove unused import of oslo.config * Code dedup in glance/tests/unit/v1/test_registry_api.py * Add unit test for migration 012 * Call _post_downgrade_### after downgrade migration is run * Use _pre_upgrade_### instead of _prerun_### * Perform database migration snake walk test correctly * redundant conditions in paginate-query * Refactor glance/tests/unit/v2/test_registry_client.py * Refactor glance/tests/unit/v1/test_registry_client.py * Improve test/utils.py * Make sure owner column doesn't get dropped during downgrade * image-delete fires multiple queries to delete its child entries * glance-replicator: enable logging exceptions into log file * Make disk and container formats configurable * Add space in etc/glance-cache.conf * Removes duplicate options registration in registry clients * remove flake8 option in run_tests.sh * Allow tests to run without installation * Remove glance CLI man page * Fix some logic in get_caching_iter * Adding metadata checking to image location proxy layer * Update .mailmap * Migrate to PBR for setup and version code * Interpolate strings after calling _() * BaseException.message is deprecated since Python 2.6 * Raise jsonschema requirement * Text formatting changes * Using unicode() convert non-English exception message * ambiguous column 'checksum' error when querying image-list(v2) * Handle None value properties in glance-replicator * Fixes Opt types in glance/notifier/notify_kombu.py * Add unit test for migration 010 * Sync models with migrations * Rename requirements files to standard names * Include pipeline option for using identity headers * Adding arguments pre-check for glance-replicator * Add v1 API x-image-meta- header whitelist * Stub out dependency on subprocess in unit tests * Allow insecure=True to be set in swiftclient * Verify if the RPC result is an instance of dict * Adds help messages to mongodb_store_db and mongodb_store_uri * Remove support for sqlalchemy-migrate < 0.7 * Don't rely on prog.Name for paste app * Simulate image_locations table in simple/api.py * Turn off debug logging in sqlalchemy by default * Glance api to pass identity headers to registry v1 * add doc/source/api in gitignore * Use cross-platform 'ps' for test_multiprocessing * Fix stubs setup and exception message formatting * Handle client disconnect during image upload * improving error handling in chunked upload 2013.2.b2 --------- * Adding Cinder backend storage driver to Glance * File system store can send metadata back with the location * index checksum image property * removed unused variable 'registry_port' * DB Driver for the Registry Service * Unit tests for scrubber * Remove references to clean arg from cache-manage * Deleting image that is uploading leaves data * Adding a policy layer for locations APIs * Add/remove/replace locations from an image * Adding multiple locations support to image downloading * Make db properties functions consistent with the DB API * Adds missing error msg for HTTPNotFound exception * Allow storage drivers to add metadata to locations * Fixes image-download error of v2 * On deleting an image, its image_tags are not deleted * Sync gettextutils from oslo * Adding store location proxy to domain * Notify does not occur on all image upload fails * Add location specific information to image locations db * Add custom RPC(Des|S)erializer to common/rpc.py * use tenant:* as swift r/w acl * Add image id to the logging message for upload * Fix cache delete-all-queued-images for xattr * Fix stale process after unit tests complete * Sync install_venv_common from oslo * Fix list formatting in docs * Fix doc formatting issue * Ignore files created by Sphinx build * Use oslo.sphinx and remove local copy of doc theme * Refactor unsupported default store testing * Add Sheepdog store * Fix 'glance-cache-manage -h' default interpolation * Fix 'glance-cache-manage list-cached' for xattr * Dont raise NotFound in simple db image_tag_get_all * Use python module loading to run glance-manage * Removed unusued variables to clean the code * Fixes exposing trace during calling image create API * Pin kombu and anyjson versions * Do not raise NEW exceptions * Port slow, overly assertive v1 functional tests to integration tests * Add a bit of description * Updated documentation to include notifications introduced in Grizzly * Make eventlet hub choice configurable * Don't run store tests without a store! * Import sql_connection option before using it * Fix for unencrypted uris in scrubber queue files * Fix incorrect assertion in test_create_pool * Do not send traceback to clients by default * Use Python 3.x compatible octal literals * Remove explicit distribute depend * Add missing Keystone settings to scrubber conf * Sql query optimization for image detail * Prevent '500' error when admin uses private marker * Replace openstack-common with oslo in HACKING.rst * Patch changes Fedora 16 to 18 on install page * Pass configure_via_auth down to auth plugin * Move sql_connection option into sqlalchemy package * Remove unused dictionary from test_registry_api.py * Remove routes collection mappings * updated content_type in the exception where it is missing * python3: Introduce py33 to tox.ini * Don't make functional tests inherit from IsolatedUnitTest * Add a policy layer for membership APIs * Prevent E500 when listing with null values * Encode headers and params * Fix pydevd module import error * Add documentation on reserving a Glance image * Import strutils from oslo, and convert to it * Sync oslo imports to the latest version 2013.2.b1 --------- * Fix undefined variable in cache * Make passing user token to registry configurable * Respond with 412 after upload if image was deleted * Add unittests for image upload functionality in v1 * Remove glance-control from the test suite * Prevent '500' error when using forbidden marker * Improve unit tests for glance.common package * Improve unit tests for glance.api.v1 module * rbd: remove extra str() conversions and test with unicode * rbd: return image size when asked * Add qpid-python to test-requires * tests: remove unused methods from test_s3 and test_swift * Implement Registry's Client V2 * RBD store uses common utils for reading file chunks * Redirects requests from /v# to /v#/ with correct Location header * Add documentation for query parameters * Small change to 'is_public' documentation * Fix test_mismatched_X test data deletion check * Add GLANCE_LOCALEDIR env variable * Remove gettext.install() from glance/__init__.py * Implement registry API v2 * Add RBD support with the location option * Use flake8/hacking instead of pep8 * Use RBAC policy to determine if context is admin * Create package for registry's client * Compress response's content according to client's accepted encoding * Call os.kill for each child instead of the process group * Improve unit tests for glance.common.auth module * Convert scripts to entry points * Fix functional test 'test_copy_from_swift' * Remove unused configure_db function * Don't raise HTTPForbidden on a multitenant environment * Expand HACKING with commit message guidelines * Redirects requests from /v# to /v#/ * Functional tests use a clean cached db that is only created once * Fixes for mis-use of various exceptions * scrubber: dont print URI of image to be deleted * Eliminate the race when selecting a port for tests * Raise 404 while deleting a deleted image * Fix test redifinitions * Sync with oslo-incubator copy of setup.py and version.py * Gracefully handle qpid errors * Fix Qpid test cases * Imported Translations from Transifex * Fix the deletion of a pending_delete image * Imported Translations from Transifex * Imported Translations from Transifex * Fix functional test 'test_scrubber_with_metadata_enc' * Make "private" functions that shouldn't be exported * Call monkey_patch before other modules are loaded * Adding help text to the options that did not have it * Improve unit tests for glance.api.middleware.cache module * Add placeholder migrations to allow backports * Add GridFS store * glance-manage should not require glance-registry.conf * Verify SSL certificates at boot time * Invalid reference to self in functional test test_scrubber.py * Make is_public an argument rather than a filter * remove deprecated assert_unicode sqlalchemy attribute * Functional tests display the logs of the services they started * Add 'set_image_location' policy option * Add a policy handler to control copy-from functionality * Fallback to inferring image_members unique constraint name * Standardize on newer except syntax * Directly verifying that time and socket are monkey patched * Reformat openstack-common.conf * Fix domain database initialization * Add tests for image visibility filter in db * Add image_size_cap documentation * Return 413 when image_size_cap exceeded * Small change to exception handling in swift store * Remove internal store references from migration 017 * Check if creds are present and not None 2013.1.rc1 ---------- * Delete swift segments when image_size_cap exceeded * bump version to 2013.2 * Don't print sql password in debug messages * fixes use the fact that empty sequences are false * Handle Swift 404 in scrubber * Remove internal store references from migration 015 * Pin SQLAlchemy to 0.7.x * Add unit tests for glance.api.cached_images module * Document the os options config for swift store * Segmented images not deleted cleanly from swift * Do not return location in headers * Fix uniqueness constraint on image_members table * Declare index on ImageMember model * Log when image_size_cap has been exceeded * Publish API version 2.1 * Fix scrubber and other utils to use log.setup() * Switch to final 1.1.0 oslo.config release * Mark password options secret * Fix circular import in glance/db/sqlalchemy * Fix up publicize_image unit test * Fix rabbit_max_retry * Fix visibility on db image_member_find * Fix calls to image_member_find in tests * Characterize image_member_find * Retain migration 12 indexes for table image_properties with sqlite * Insure that migration 6 retains deleted image property index * Fix check_003 method * Ensure disk_ and container_format during upload * Honor metadata_encryption_key in glance domain * Fix v2 data upload to swift * Switch to oslo.config * Update acls in the domain model * Refactor leaky abstractions * Remove unused variable 'image_member_factory' * Generate notification for cached v2 download * A test for concurrency when glance uses sleep * Update documentation to reflect API v2 image sharing * v1 api image-list does not return shared images * Cannot change locations on immutable images * Update db layer to expose multiple image locations * Test date with UTC instead of local timezone * Added better schemas for image members, revised tests * Add pre and check phases to test migration 006 * Fix response code for successful image upload * Remove unused imports * Add pre and check phases to test migration 005 * Add pre and check phases to test migration 004 * Add PostgreSQL support to test migrations * Enable support for MySQL with test migrations * Set status to 'active' after image is uploaded * Removed controversial common image property 'os_libosinfo_shortid' * Parse JSON Schema Draft 10 in v2 Image update * Redact location from notifications * Fix broken JSON schemas in v2 tests * Add migration 021 set_engine_mysql_innodb * Refactor data migration tests * Fix migration 016 for sqlite * Pin jsonschema version below 1.0.0 * Add check for image_locations table * Avoid using logging in signal handlers * monkey_patch the time module for eventlet * Remove compat cfg wrapper * Remove unnecessary logging from migration 019 * Fix migration 015 downgrade with sqlite * Document db_auto_create in default config files * Update openstack.common * Extend the domain model to v2 image data 2013.1.g3 --------- * Add migration 20 - drop images.location * Add migration 19 - move image location data * Filter images by status and add visibility shared * Update oslo-config version * Sync latest install_venv_common.py * Adding new common image properties * Use oslo-config-2013.1b3 * Add migration 18 - create the image_locations table * Create connection for each qpid notification * Add migration to quote encrypted image location urls * Updates OpenStack LLC with OpenStack Foundation * Allowing member to set status of image membership * Add an update option to run_tests.sh * Use install_venv_common.py from oslo * Add status column to image_members * Adding image members in glance v2 api * Fix issues with migration 012 * Add migration.py based on the one in nova * Updated_at not being passed to db in image create * Fix moker typo in test_notifier * Clean dangling image fragments in filesystem store * Sample config and doc for the show_image_direct_url option * Avoid dangling partial image on size/checksum mismatch * Fix version issue during nosetests run * Adding database layer for image members domain model * Image Member Domain Model * Additional image member information * Adding finer notifications * Add LazyPluggable utility from nova * Update .coveragerc * Removed unnecessary code * Use more-specific value for X-Object-Manifest header * Allow description fields to be translated in schema * Mark password config options with secret * Update HACKING.rst per recent changes * Encrypt scrubber marker files * Quote action strings before passing to registry * Fixes 'not in' operator usage * Add to multi-tenant swift store documentation * Replace nose plugin with testtools details * Convert some prints to addDetails calls * Rearrange db tests in prep for testr * Stop using detailed-errors plugin for nose * Add _FATAL_EXCEPTION_FORMAT_ERRORS global * Fix kwargs in xattr BadDriverConfiguration exc * Prints list-cached dates in isoformat * Fail sensibly if swiftclient absent in test * Initialize CONF properly in store func tests * Ensure swift_store_admin_tenants ACLs are set * Remove Swift location/password from messages * Removed unnecessary code * Removed unncessary code * Pull in tarball version fix from oslo * Updated image loop to not use an enumerator * Log exception details * Update version code from oslo * Revert "Avoid testtools 0.9.25" * Avoid testtools 0.9.25 * Update glance config files with log defaults * Sync latest cfg and log from oslo-incubator * Make v2 image tags test not load system policy * Replace custom tearDown with fixtures and cleanup * Update version code from oslo * Use testtools for unittest base class * Stub out find_file... fix policy.json test issue * Remove unused declaration in images.py * Add import for filesystem_store_datadir config * Update v1/images DELETE so it returns empty body * Relax version constraint on Webob-1.0.8 * Set content-length despite webob * Update common openstack code from oslo-incubator * Modify the v2 image tags to use domain model grizzly-2 --------- * Fix broken link in docs to controllingservers * Adding a means for a glance worker to connect back to a pydevd debugger * Use imported exception for update_store_acls * Fix import order nits * Verify size in addition to checksum of uploaded image * Use one wsgi app, one dbengine worker * Set Content-MD5 after calling webob.Response._app_iter__set * Modify the v2 image controller to use domain model * Log error on failure to load paste deploy app * Configure endpoint_type and service_type for swift * Refactor multi-tenant swift store * Add registry_client_timeout parameter * Use io.BufferedIOBase.read() instead of io.BytesIO.getvalue() * Port to argparse based cfg * wsgi.Middleware forward-compatibility with webob 1.2b1 or later * Allow running testsuite as root user * Allow newer boto library versions * Fixed image not getting deleted from cache * Updates keystone middleware classname in docs * v2 API image upload set image status to active * Use auth_token middleware from python-keystoneclient * Add domain proxies that stop unauthorized actions * Add domain proxies that do policy.enforce checks * Use 'notifications' as default notification queue name * Unused variables removed * Fixed deleted image being downloadable by admin * Rewrite S3 functional tests * Add store test coverage for the get_size method * Implement get_size filesystem store method * Add an image repo proxy that handles notifications * Fixed Typo * Return size as int from store get call * Wrap log messages with _() * Add pep8 ignore options to run_tests.sh * Fix typo uudiutils -> uuidutils * Make cooperative reader always support read() * Add an image proxy to handle stored image data grizzly-1 --------- * Allow for not running pep8 * Refactor where store drivers are initialized * Audit error logging * Stop logging all registry client exceptions * Remove unused imports * Add note about urlencoding the sql_connection config opt * Add an image repo to encapsulate db api access * Add an image domain model and related helpers * Fix simple db image_get to look like sqlalchemy * Return 403 on images you can see but can't modify * Fixes is_image_visible to not use deleted key * Ensure strings passed to librbd are not unicode * Use generate_uuid from openstack common * Update uuidutils from openstack common * Code cleanup: remove ImageAddResult class * Lowering certain log lines from error to info * Prevent infinite respawn of child processes * Make run_tests.sh run pep8 checks on bin * Make tox.ini run pep8 checks on bin * Pep8 fixes to bin/glance* scripts * Ensure authorization before deleting from store * Port uuidutils to Glance * Delete from store after registry delete * Unit test remaining glance-replicator methods * Use openstack common timeutils in simple db api * Unit test replication_dump * pin sqlalchemy to the 0.7 series * DRY up image fetch code in v2 API * Return 403 when admin deletes a deleted image * Pull in a versioning fix from openstack-common * Fixes deletion of invalid image member * Return HTTP 404 for deleted images in v2 * Update common to 18 October 2012 * implements selecting version in db sync * add command "status" to "glance-control" * Disallow admin updating deleted images in v2 api * Clean up is_public filtering in image_get_all * SSL functional tests always omitted * Fix scrubber not scrubbing with swift backend * Add OpenStack trove classifier for PyPI * Disallow updating deleted images * Unit test replication_size * Add noseopts and replace noseargs where needed to run_test.sh * Setup the pep8 config to check bin/glance-control * Change useexisting to extend_existing to fix deprecation warnings * Fix fragile respawn storm test * Fix glance filesystem store race condition * Add support for multiple db test classes * Don't parse commandline in filesystem tests * Improve test coverage for replicator's REST client * Correct conversion of properties in headers * Add test for v2 image visibility * change the default sql connection timeout to 60s * Add test for v1 image visibility * FakeAuth not always admin * Add GLANCE_TEST_TMP_DIR environment var for tests * Call setup_s3 before checking for disabled state * Add insecure option to registry https client * Clean up pep8 E128 violations * Rename non-public method in sqlalchemy db driver * Add image_member_update to simple db api * Multiprocess respawn functional test fix * Remove unnecessary set_acl calls * Clean up pep8 E127 violations * Remove notifications on error * Change type of rabbit_durable_queues to boolean * Pass empty args to test config parser * Document api deployment configuration * Clean up pep8 E125 violations * Clean up pep8 E124 violations * Ensure workers set to 0 for all functional tests * image_member_* db functions return dicts * Alter image_member_[update|delete] to use member id * Add test for db api method image_member_create * Add test for image_tag_set_all * Add rabbit_durable_queues config option * Remove extraneous db method image_property_update * Update docs with modified workers default value * Replace README with links to better docs * Remove unused animation module * Drop Glance Client * Enable multi-processing by default * Ensure glance-api application is "greened" * Clean up pep8 E122, E123 violations * Clean up pep8 E121 violations * Fix scrubber start & not scrubbing when not daemon * Clean up pep8 E502, E711 violations * Expand cache middleware unit tests * Change qpid_heartbeat default * Don't WARN if trying to add a scheme which exists * Add unit tests for size_checked_iter * Add functional tests for the HTTP store * Generalize remote image functional test * Add filesystem store driver to new func testing * Add region configuration for swift * Update openstack-common log and setup code * Update v2.0 API version to CURRENT * Set new version to open Grizzly development * Add s3_store_bucket_url_format config option * Ensure status of 'queued' image updated on delete * Fallback to a temp pid file in glance-control * Separate glance cache client from main client * Rewrite Swift store functional tests * Raise bad request early if image metadata is invalid * Return actual unicode instead of escape sequences in v2 * Handle multi-process SIGHUP correctly * Remove extraneous whitespace in config files * Remove db auto-creation magic from glance-manage * Makes deployed APIs configurable * Asynchronously copy from external image source * Sort UUID lists in test_image_get_all_owned * Call do_start correctly in glance-control reload * Sync some misc changes from openstack-common * Sync latest cfg changes from openstack-common * Exception Handling for image upload in v2 * Fix cache not handling backend failures * Instantiate wsgi app for each worker * Require 'status' in simple db image_create * Drop glance client + keystone config docs * Use PATCH instead of PUT for v2 image modification * Delete image from backend store on delete * Document how to deploy cachemanage middleware * Clean up comments in paste files * WARN and use defaults when no policy file is found * Encode headers in v1 API to utf-8 * Fix LP bug #1044462 cfg items need secret=True * Always call stop_servers() after having started them in tests * Adds registry logging * Filter out deleted image properties in v2 api * Limit simple db image_create to known image attrs * Raise Duplicate on image_create with duplicate id * Expand image_create db test * Add test for nonexistent image in db layer * Catch pruner exception when no images are cached * Remove bad error message in glance-cache-manage * Add missing columns to migration 14 * Adds notifications for images v2 * Move authtoken config out of paste * Add kernel/ramdisk_id, instance_uuid to v2 schema * Tweak doc page titles * Drop architecture doc page * Add link to notifications docs on index * Remove repeated image-sharing docs * Tidy up API docs * Log level for BaseContextMiddleware should be warn * Raise Forbidden exception in image_get * Activation notification for glance v1 api * Add glance/versioninfo to MANIFEST.in * HTTPBadRequest in v2 on malformed JSON request body * PEP8 fix in conf.py * Typo fix in glance: existant => existent * Rename glance api docs to something more concise * Drop deprecated client docs * Clean up policies docs page * Remove autodoc and useless index docs * Add nosehtmloutput as a test dependency * Remove partial image data when filesystem is full * Add 'bytes' to image size rejection message * Add policy check for downloading image * Convert limiting_iter to LimitingReader * Add back necessary import * Adds glance registry req id to glance api logging * Make max image size upload configurable * Correctly re-raise exception on bad v1 checksum * Return httplib.HTTPResponse from fake reg conn * Add DB Management docs * Fix auth cred opts for glance-cache-manage * Remove unused imports * Set proper auth middleware option for anon. access * multi_tenant: Fix 'context' is not defined error * Validate uuid-ness in v2 image entity * v2 Images API returns 201 on image data upload * Fixes issue with non string header values in glance client * Fix build_sphinx setup.py command * Updates Image attribute updated_at * Add policy enforcment for v2 api * Raise 400 error on POST/PUTs missing request bodies folsom-3 -------- * Mark bin/glance as deprecated * Return 201 on v2 image create * Ignore duplicate tags in v2 API * Expose 'protected' image attribute in v2 API * Move to tag-based versioning * Update restrictions on allowed v2 image properties * Reveal v2 API as v2.0 in versions response * Add min_ram and min_disk to v2 images schema * Filter out None values from v2 API image entity * Refactor v2 images resource unit tests * Use container_format and disk_format as-is in v2 * Make swift_store_admin_tenants a ListOpt * Update rbd store to allow copy-on-write clones * Call stop_servers() in direct_url func tests * Drop unfinshed parts of v2 API * Fix a couple i18n issues in glance/common/auth.py * Sync with latest version of openstack.common.notifier * Sync with latest version of openstack.common.log * Sync with latest version of openstack.common.timeutils * Sync with latest version of openstack.common.importutils * Sync with latest version of openstack.common.cfg * Allows exposing image location based on config * Do not cache images that fail checksum verfication * Omit deleted properties on image-list by property * Allow server-side validation of client ssl certs * Handle images which exist but can't be seen * Adds proper response checking to HTTP Store * Use function registration for policy checks * fix the qpid_heartbeat option so that it's effective * Add links to image access schema * ^c shouldn't leave incomplete images in cache * uuid is a silly name for a var * Support master and slave having different tokens * Add a missing header strip opportunity * URLs to glance need to be absolute * Use with for file IO * Add swift_store_admin_tenants option * Update v1/v2 images APIs to set store ACLs * Use event.listen() instead of deprecated listeners kwarg * Store context in local thread store for logging * Process umask shouldn't allow world-readable files * Make TCP_KEEPIDLE configurable * Reject rather than ignore forbidden updates * Raise HTTPBadRequest when schema validation fails * Expose 'status' on v2 image entities * Simplify image and access_record responses * Move optional dependencies from pip-requires to test-requires * Fix dead link to image access collection schema * Add in missing image collection schema link * Drop static API v2 responses * Include dates in detailed image output * Update image caching middleware for v2 URIs * Ensure Content-Type is JSON-like where necessary * Have non-empty image properties in image.delete payload * Add Content-MD5 header to V2 API image download * Adds set_acls function for swift store * Store swift images in separate containers * Include chunk_name in swift debug message * Set deleted_at field when image members and properties are deleted * Use size_checked_iter in v2 API * Honor '--insecure' commandline flag also for keystone authentication * Make functional tests listen on 127.0.0.1 * Adds multi tenant support for swift backend * Provide stores access to the request context * Increase wait time for test_unsupported_default_store * Match path_info in image cache middleware * Dont show stack trace on command line for service error * Replace example.com with localhost for some tests * Fix registry error message and exception contents * Move checked_iter from v1 API glance.api.common * Support zero-size image creation via the v1 API * Prevent client from overriding important headers * Updates run_tests.sh to exclude openstack-common * Use openstack.common.log to log request id * Update 'logging' imports to openstack-common * Make get_endpoint a generic reusable function * Adds service_catalog to the context * Add openstack-common's local and notifier modules * Making docs pretty! * Removing 'Indices and tables' heading from docs * Remove microseconds before time format conversion * Add bin/glance-replicator to scripts in setup.py * Initial implementation of glance replication * Generate request id and return in header to client * Reorganize context module * Add openstack.common.log * Ignore openstack-common in pep8 check * Keystone dep is not actually needed * Report size of image file in v2 API * Expose owner on v2 image entities * Add function tests for image members * Allow admin's to modify image members * Allow admins to share images regardless of owner * Improve eventlet concurrency when uploading/downloading * Simplify v2 API functional tests folsom-2 -------- * Fix IndexError when adding/updating image members * Report image checksum in v2 API * Store properties dict as list in simple db driver * Use PyPI for swiftclient * Refactor pagination db functional tests * Combine same-time tests with main db test case * Add retry to server launch in respawn test * Reorder imports by full import path * Adds /v2/schemas/images * Implement image filtering in v2 * Include all tests in generated tarballs * Allow CONF.notifier_strategy to be a full path * Add image access records schema for image resources * Remove image members joinedload * Clean up image member db api methods * Retry test server launch on failure to listen * Make image.upload notification send up2date metadata * Added schema links logic to image resources * Simplify sqlalchemy imports in driver * Reduce 'global' usage in sqlalchemy db driver * Standardize logger instantiation * Add link descriptor objects to schemas * Fix exception if glance fails to load schema * Move the particulars of v2 schemas under v2 * Remove listing of image tags * Set up Simple DB driver tests * Trace glance service on launch failure * Revert "Funnel debug logging through nose properly." * Capture logs of failing services in assertion msg * Remove some more glance-cache PasteDeploy remnants * Fix typo of conf variable in config.py * Remove unused imports in db migrations * Increase timeout to avoid spurious test failures * adds missing import and removes empty docstring * Convert db testing to use inheritance * Clean up .pyc files before running tests * make roles case-insensitive * Funnel debug logging through nose properly * Fix typo of swift_client/swiftclient in store_utils * Stop revealing sensitive store info * Avoid thread creation prior to service launch * Don't use PasteDeploy for scrubber and cache daemons * Remove some unused glance-cache-queue-image code * Implement pagination and sorting in v2 * Turn off SQL query logging at log level INFO * Default db_auto_create to False * Use zipballs instead of git urls * Add metadata_encryption_key to glance-cache.conf * Fix help messages for --debug * Use python-swiftclient for swift store * Fix to not use deprecated response.environ any more * Import db driver through configuration * Move RequestContext.is_image_* methods to db layer * Begin replacement of sqlalchemy driver imports * webob exception incorrectly used in v1 images.py * Add tests and simplify GlanceExceptions * Update default values for known_stores config * Remove the conf passing PasteDeploy factories * Port remaining code to global conf object * Made changes to adhere to HACKING.rst specifications * Use openstack-common's policy module * Re-add migrate.cfg to tarball * Implements cleaner fake_request * Create 'simple' db driver * Glance should use openstack.common.timeutils * Clean up a few ugly bits from the testing patch * Fix typo in doc * Add cfg's new global CONF object * fix side effects from seekability test on input file * Just use pure nosetests * Fix coverage jobs. Also, clean up the tox.ini * Move glance.registry.db to glance.db * Glance should use openstack.common.importutils * Add read-only enforcement to v2 API * Add a base class for tests * Expose tags on image entities in v2 API * Add additional info. to image.delete notification * Expose timestamps on image entities in v2 API * Sync with latest version of openstack.common.cfg * Enable anonymous access through context middleware * Add allow_additional_image_properties * Fix integration of image properties in v2 API * Lock pep8 at v1.1 * Lock pep8 to version 0.6.1 in tox.ini * Fail gracefully if paste config file is missing * Add missing files to tarball * Remove unused imports in setup.py * Adds sql_ config settings to glance-api.conf * Correct format of schema-image.json * Fix paste to correctly deploy v2 API * Add connection timeout to glance client * Leave behind sqlite DB for red functional tests * Support DB auto-create suppression * Fix glance-api process leak in respawn storm test * Stubout httplib to avoid actual http calls * Backslash continuation removal (Glance folsom-1) * Implement image visibility in v2 API * Add min_ram and min_disk to bin/glance help * Implements blueprint import-dynamic-stores * Add credential quoting to Swift's StoreLocation * Combine v2 functional image tests * Simplify JSON Schema validation in v2 API * Expose deployer-specific properties in v2 API * Test that v2 deserializers use custom schemas * Load schema properties when v2 API starts * Support custom properties in schemas for v2 API * Fix tiny format string nit in log message * Fixes bug 997565 * Allow chunked image upload in v2 API * wsgi: do not respawn on missing eventlet hub * Implement v2 API access resource * Disallow image uploads in v2 API when data exists * Implement v2 API image tags * Use ConfigOpts.find_file() for policy and paste * Implement image data upload/download for v2 API * Use sdist cmdclass from openstack-common * glance-api: separate exit status from message * Update noauth caching pipeline to use unauth-ctx * Return 204 from DELETE /v2/images/ * Add localization catalog and initial po files to Glance. Fix bug 706449 * Add /v2 to sample glance-api-paste.ini * Basic functionality of v2 /images resource * Split noauth context middleware into new class * Add -c|--coverage option to run_tests.sh * Convert glance to glance/openstack/common/setup.py * Update glance to pass properly tenant_name * Cleanup authtoken examples * Support for directory source of config files * Support conf from URL's with versions * Auto generate AUTHORS file for glance * Integrate openstack-common using update.py * Fixes LP #992096 - Ensure version in URL * Begin functional testing of v2 API * Fixes LP #978119 - cachemanagement w/o keystone * Omit Content-Length on chunked transfer * Fix content type for qpid notifier * Remove __init__.py from locale dir * Fix i18n in glance.notifier.notify_kombu * Override OS_AUTH_URL when running functional tests * remove superfluous 'pass' * fix bug lp:980892,update glance doc * Add a space to fix minor typo in glance help * Suppress pagination on non-tty glance index * Kill glance-api child workers on SIGINT * Ensure swift auth URL includes trailing slash * add postgresql support to test_migrations * 012_id_to_uuid: Also convert ramdisk + kernel ids * API v2 controller/serialization separation * search for logger in PATH * Set install_requires in setup.py * Minor grammar corrections * Bootstrapping v2 Image API implementation * Fix db migration 12 * Remove unused imports * Reorganize pipelines for multiple api versions * Skip test depending on sqlite3 if unavailable * Defaulted amazon disk & container formats * Compile BigInteger to INTEGER for sqlite * Updated RST docs on containers, fewer references to OVF format * rename the right index * Reject excessively long image names * Test coverage for update of image ownership * Add MySQLPingListener() back * Add support for auth version 2 * Run version_control after auto-creating the DB * Allow specifying the current version in 'glance-manage version_control' * Publish v2 in versions responses * Allow yes-like values to be interpreted as bool * Support owner paramater to glance add * Adding versioned namespaces in test dir * Typo * Ensure functional db connection in configure_db() * Set content_type for messages in Qpid notifier * Avoid leaking secrets into config logging * Fixes lp959670 * Send output of stty test cmd to stderr * Use unique per-test S3 bucket name * Specify location when creating s3 bucket * Open Folsom * Update 'bin/glance add' docstring *_format options * Ensure all unauthorized reponses return 403 * Avoid leaking s3 credentials into logs * Avoid glance-logcapture displaying empty logs * Add 'publicize_image' policy * Fixed db conn recovery issue. Fixes bug 954971 * tox tests with run_tests.sh instead of nosetests * Don't use auth url to determine service protocol * Use tenant/user ids rather than names * Update context middleware with supported headers * Fixes LP #957401 - Remove stray output on stderr * check connection in Listener. refer to Bug #943031 * Avoid tests leaking empty tmp dirs * Remove keystone.middleware.glance_auth_token * Updating version of Keystone * Add policy checks for cache manage middleware * nose plugin to capture glance service logs * Add new UnexpectedStatus exception * Do not error when service does not have 'type' * Disambiguates HTTP 401 and HTTP 403 in Glance. Fixes bug 956513 * Add admin_role option * Remove references to admin_token * Remove glance-cache-queue-image * Remove dependency on apiv1app from cachemanage * Return 403 when policy engine denies action * Add error checking to get_terminal_size * Well-formed exception types for 413 & 503 * Ensure copy and original image IDs differ * Include babel.cfg and glance.pot in tarballs * Updating authentication docs * General cleanup * General docs cleanup * Remove todolist from docs * Add note about cache config options * Change CLIAuth arg names * Retry sendfile on EAGAIN or EBUSY * Add module name to ClientException * Update cli docs * Remove 'community' doc page * Removing registry spec from docs * Fixes LP#934492 - Allow Null Name * Refresh SSL cfg after parsing service catalog entry * Fix typo in tox.ini * Glance cache updates to support Keystone Essex * updates man page for glance-scrubber. this time with extra pep8 scrubbing powers. Fixes bug 908803 * Update tox.ini for jenkins * Replaced use of webob.Request.str_param * Update paste file to use service tenant * Update bin/glance to allow for specifying image id * Fix deprecated warnings * Remove trailing whitespaces in regular file * add git commit date / sha1 to sphinx html docs * Glance skip prompting if stdin isn't a tty * Allow region selection when using V2 keystone * Disallow file:// sources on location or copy-from * Progress bar causes intermittent test failures * Added first step of babel-based translations * Complete fix for modification of unowned image * Fix update of queued image with location set * Support copy-from for queued images * Add checksum to an external image during add * Align to jenkins tox patterns * Fix MANIFEST.in to include missing files * Fix exception name * Correct kernel/ramdisk example in docs * Create sorting/pagination helper function * Support new image copied from external storage * blueprint progressbar-upload-image * Avoid TestClient error on missing '__mro__' attr * disk/container_format required on image activate * Require container & disk formats on image create * Support non-UTC timestamps in changes-since filter * Return 503 if insufficient permission on filestore * Adds README.rst to the tarball * Ensure StorageFull only raised on space starvation * Require auth URL if keystone strategy is enabled * 003_add_disk_format.py: Avoid deadlock in upgrade * Function uses 'msg' not 'message' * Fix paging ties * Ensure sane chunk size when pysendfile unavailable * New -k/--insecure command line option * Add a generic tox build environment * Fix pep8 error * Update Authors file * Implement blueprint add-qpid-support * Include glance/tests/etc * Don't fail response if caching failed * Force auth_strategy=keystone if --auth_url or OS_AUTH_URL is set * Make Glance work with SQLAlchemy 0.7 * Use sendfile() for zero-copy of uploaded images * Respawn glance services on unexpected death * Blueprint cli-auth: common cli args * Prep tox config for jenkins builds * Get rid of DeprecationWarning during db migration * Add --capture-output option to glance-control * Add filter validation to glance API * Fixes LP 922723 * Typofix is_publi -> is_public * Add --await-child option to glance-control * Fix Bug #919255 * Cap boto version at 2.1.1 * Simplify pep8 output to one line per violation * Handle access restriction to public unowned image * Check service catalogue type rather than name * Restore inadvertantly dropped lines * Include the LICENSE file in the tarball * Change xattr usage to be more broadly compatible * Fix mixed usage of 's' and 'self' * Don't force client to supply SSL cert/key * Few small cleanups to align with Nova essex-3 ------- * Adds documentation for policy files * Client.add_image() accepts image data as iterable * More flexible specification of auth credentials * glance-api fails fast if default store unsupported * Bug #909574: Glance does not sanity-check given image size on upload * glance-control need not locate a server's config file (lp#919520) * Bug#911599 - Location field wiped on update * Return 400 if registry returns 400 * Set url's on AuthBadRequest exceptions * Add policy checking for basic image operations * Swallow exception on unsupported image deletion * Ensure we only send a single content-type header * Multi-process Glance API server support * Set size metadata correctly for remote images * Make paste.ini file location configurable * Avoid the need for users to manually edit PasteDeploy config in order to switch pipelines * Split out paste deployment config from the core glance *.conf files into corresponding *-paste.ini files * Fixes LP Bug#913608 - tests should be isolated * Set correct Content-Length on cached remote images * Implement retries in notify_kombu * Return correct href if bind_host is 0.0.0.0 * Remove assertDictEqual for python 2.6 compatibility * Add optional revision field to version number * LP Bug#912800 - Delete image remain in cache * Add notifications for sending an image * Bug #909533: Swift uploads through Glance using ridiculously small chunks * Add Fedora clauses to the installing document * Remove doc/Makefile * Fixes incorrect URI scheme for s3 backend * Add comments for swift options in glance-api.conf * Split notification strategies out into modules * fix bug 911681 * Fix help output for inverse of BoolOpt * PEP8 glance cleanup * Add more man pages * Set execute permissions on glance-cache-queue-image * Add a LICENSE file * Add ability to specify syslog facility * Install an actual good version of pip * Bug #909538: Swift upload via Glance logs the password it's using * Add tox.ini file * Synchronize notification queue setup between nova and glance * Fixes keystone auth test failures in python 2.6 * Removed bin/glance's TTY detection * Fixes request with a deleted image as marker * Adds support for protecting images from accidental deletion * Fix for bug 901609, when using v2 auth should use /v2.0/tokens path * Updated glance.registry.db for bug 904863 * Removing caching cruft from bin/glance * Fixes LP Bug#901534 - Lost properties in upload * Update glance caching middleware so doesn't try to process calls to subresources. Fixes LP bug #889209 * Ensure functional tests clean up their images * Remove extra swift delete_object call * Add missing files to tarball * Allow glance keystone unit tests to run with essex keystone * Convert glance to use the new cfg module * Add new cfg module * Lock keystone to specific commit in pip-requires * Add the missing column header to list-cached * Rename 'options' variables to 'conf' * Add generic PasteDeploy app and filter factories * Secondary iteration of fix for bug 891738 * Rename .glance-venv to .venv * Fix for bug 900258 -- add documentation for '--url' glance cli option * Add --url option to glance cli * Fixes LP Bug#850377 * Fixes LP Bug#861650 - Glance client deps * Added some examples for "glance add" * Bug#894027: use correct module when building docs * Adds option to set custom data buffer dir * Fix bug 891738 * Added missing depend on nosexcover * Removed some cruft * Fixes LP Bug#837817 - bin/glance cache disabled * Separating add vs general store configuration * Fixes LP Bug#885341 - Test failure in TestImageCacheManageXattr * Making prefetcher call create_stores * Fix handle get_from_backend returning a tuple * Casting foreign_keys to a list in order to index into it * Using Keystone's new port number 35357 * Adding admin_token to image-cache config * Removing assertGreaterEqual * Correcting image cleanup in cache drivers * Adding tests to check 'glance show ' format * Update 'glance show' to print a valid URI. Fixes bug #888370 * Gracefully handle image_cache_dir being undefined * Remove unused versions pipeline from PasteDeploy config * Allow glance-cache-* find their config files * Add some test cases for glance.common.config * Fix name error in cache middleware * Check to make sure the incomplete file exists before moving it during rollback. Fixes bug #888241 * Fix global name 'sleep' is not defined in wsgi.py. Fixes bug #888215 * Fixes LP Bug#878411 - No docs for image cache * Fix typo in the cached images controller essex-1 ------- * load gettext in __init__ to fix '_ is not defined' * Adds option to encrypt 'location' metadata * Fix LP Bug#885696 two issues with checked_iter * Fix Keystone API skew issue with Glance client * Fixed test failure in Python 2.6 * Glance redirect support for clients * Fixes LP Bug#882185 - Document Swift HTTPS default * Fixes LP Bug#884297 - Install docs should have git * Add "import errno" to a couple of files * Consolidate glance.utils into glance.common.utils * Correcting exception handling in glance-manage * More cache refactoring - Management Middleware * Fixes LP Bug#882585 - Backend storage disconnect * Convert image id value to a uuid * Remove 'location' from POST/PUT image responses * Removing glance-upload * Adds Driver Layer to Image Cache * Removed 'mox==0.5.0' and replaced with just 'mox' in tools/pip-requires * Removing duplicate mox install in pip-requires * Add .gitreview config file for gerrit * Making TCP_KEEPIDLE socket option optional * Overhauls the image cache to be truly optional * Fixing functional tests that require keystone * Fixes LP Bug#844618 - SQLAlchemy errors not logged * Additions to .gitignore * Better document using Glance with Keystone * Fixes LP Bug#872276 - small typo in error message * Adds SSL configuration params to the client * Increases test coverage for the common utils * Refactoring/cleanup around our exception handling * Port Authors test to git * Add RBD store backend * Fixes LP Bug#860862 - Security creds still shown * Extract image members into new Glance API controller * Refactoring registry api controllers * Returning functionality of s3 backend to stream remote images * Make remote swift image streaming functional * Improving swfit store uri construction * Fixes LP Bug #850685 * Do not allow min_ram or min_disk properties to be NULL and if they are None, make sure to default to 0. Fixes bug 857711 * Implementing changes-since param in api & registry * Documenting nova_to_os_env.sh tool * Added min_disk and min_ram properties to images Fixes LP Bug#849368 * Fixing bug 794582 - Now able to stream http(s) images * Fixes LP Bug#755916 - Location field shows creds * Fixes LP Bug #804429 * Fixes Bug #851216 * Fixes LP Bug #833285 * Fixes bug 851016 * Fix keystone paste config for functional tests * Updating image status docs * * Scrubber now uses registry client to communicate with registry * glance-api writes out to a scrubber "queue" dir on delete * Scrubber determines images to deleted from "queue" dir not db * Fixes LP Bug#845788 * Open Essex * Remove PWD from possible config_file_dirs * Update paste config files with keystone examples. see ticket: lp839559 * Adding Keystone support for Glance client * Fix cached-images API endpoint * Bug fix lp:726864 * Fixes Bug: lp825024 * Add functional tests * Switch file based logging to WatchedFileHandler for logrotate * Fixes LP Bug #827660 - Swift driver fail 5G upload * Bug lp:829064 * Bug lp:829654 * Update rfc.sh to use 'true' * Addresses glance/+spec/i18n * Addresses glance/+spec/i18n * Add rfc.sh for git review * Add support for shared images * Add notifications for uploads, updates and deletes * Bug Fix lp:825493 * Bug fix lp:824706 * Adds syslog support * Fixes image cache enabled config * Improves logging by including traceback * Addresses glance/+spec/i18n * casting image_id to int in db api to prevent false matching in database lookups * Addresses Bug lp:781410 * Removes faked out datastore entirely, allowing the DB API to be unit tested * Consolidates the functional API test cases into /glance/tests/functional/test_api.py, adds a new Swift functional test case, verified that it works on Cloud Files with a test account * breaking up MAX_ITEM_LIMIT and making the new values configurable * Add @skip_if_disabled decorator to test.utils and integrate it into the base functional API test case. The S3 functional test case now uses test_api.TestApi as its base class and the setUp() method sets the disabled and disabled_message attributes that the @skip_if_disabled decorator uses * Adds swift_enable_snet config * Fixes bug lp:821296 * Detect python version in install_venv * Implemented @utils.skip_test, @utils.skip_unless and @utils.skip_if functionality in glance/test/utils.py. Added glance/tests/unit/test_skip_examples.py which contains example skip case usages * Changed setup.py to pull version info from git * Removes the call to webob.Request.make_body_seekable() in the general images controller to prevent the image from being copied into memory. In the S3 controller, which needs a seekable file-like object when calling boto.s3.Key.set_contents_from_file(), we work around this by writing chunks of the request body to a tempfile on the API node, then stream this tempfile to S3 * Make sure we're passing the temporary file in a read-mode file descriptor to S3 * Removes the call to webob.Request.make_body_seekable() in the general images controller to prevent the image from being copied into memory. In the S3 controller, which needs a seekable file-like object when calling boto.s3.Key.set_contents_from_file(), we work around this by writing chunks of the request body to a tempfile on the API node, then stream this tempfile to S3 * - removed curl api functional tests - moved httplib2 api functional tests to tests/functional/test_api.py * merging trunk * Make tests a package under glance * removing curl tests and moving httplib2 tests * Move tests under the glance namespace * Add filter support to bin/glance index and details calls * merging trunk * Update registry db api to properly handle pagination through sorted results * Our code doesn't work with python-xattr 0.5.0, and that's the version installed in RH/Centos :( Andrey has updated the RPM config to specify 0.6.0, and this does the same to pip-requires * Replaced occurances of |str(e)| with |"%s" % e| * First round of refactoring on stores * Remove expected_size stuff * Make calling delete on a store that doesn't support it raise an exception, clean up stubout of HTTP store and testing of http store * adding sort_key/sort_dir to details * merging lp:~rackspace-titan/glance/registry-marker-lp819551 * adding sort_key/sort_dir params * adding --fixes * adding complex test cases to recreate bug; updating db api to respect marker * Add configuration check for Filesystem store on configure(), not every call to add() * Refactor S3 store to make configuration one-time at init versus every method call invocation * Refactor Swift store to make configuration one-time at init versus every method call invocation * Forgot to add a new file.. * Refactors stores to be stateful: * Make sure xattr>=0.6.0 in pip-requires * updating documentation * making limit option an integer * updating broken tests * adding limit/marker to bin/glance details call * adding limit/marker params to bin/glance index * merging trunk * Use of "%default" in help string does not work, have to use "%(default)s". Per the 4th example http://docs.python.org/dev/library/argparse.html#prog * Added nose-exclude to pip-requires * Installed nose-exclude, ./run_tests.sh --unittests-only add '--exclude-dir=tests/functional' to NOSEARGS * This one has been bugging me for a while, finally found out how to use the local default variable in the help string * adding --fixes to commit * Replaced occurances of |str(e)| with |"%s" % e| * Completes the S3 storage backend. The original code did not actually fit the API from boto it turned out, and the stubs that were in the unit test were hiding this fact * Fix for boto1.9b issue 540 (http://code.google.com/p/boto/issues/detail?id=540) * Remove unnecessary hashlib entry in pip-requires * Add myself to Authors (again) * hashlib exists all of the way back to python 2.5, there's no need to install an additional copy * Adds image_cache_enabled config needed to enable/disable the image-cache in the glance-api * Add more unit tests for URI parsing and get_backend_class() (which is going away in refactor-stores branch, but oh well..) * Added unit tests for swift_auth_url @property. It was broken. startwith('swift+http') matches swift+https first * Don't tee into the cache if that image is already being written * Re-add else: raise * Final fixes merging Rick's swift_auth_url @property with previous URI parsing fixes that were in the S3 bug branch.. * merge trunk * This updates the pep8 version in pip-requires and updates run_tests.sh to provide a '-p' option that allows for just pep8 to be run * Adding back image_cache_enabled config option for glance-api * Don't tee same image into cache multiple times * Fixes two things: * adding run_tests.sh -p * PEP8 whitespace fix * Swift client library needs scheme * Add tests for bad schemes passed to get_backend_class() * Add tests for bad URI parsing and get_backend_class() * Include missing bin/glance-scrubber in tarball * Include bin/glance-scrubber in tarball binaries * One more auth_tok-related change, to make it easier for nova to use the client without violating any abstraction boundaries * Add fix for Bug #816386. Wait up to 5 min for the image to be deleted, but at least 15 seconds * remove superfluous if statement * Loop up to 5 min checking for when the scrubber deletes * Typo in error condition for create_bucket_on_put, make body seekable in req object, and remove +glance from docs and configs * Add functional test case for checking delete and get of non-existing image * New local filesystem image cache with REST managment API * PEP8 Fixes * Using DELETE instead of POST reap_invalid, reap_stalled * Forgot to put back fix for the get_backend_class problem.. * Adding logging if unable to delete image cache file * Add test case for S3 s3_store_host variations and fixes for URL bug * Ensure image is active before trying to fetch it * Boy, I'm an idiot...put this in the wrong branch directory.. * Handling ZeroDivision Error * Using alternate logging syntax * Missing import of common.config in S3 driver * Tighten up file-mode handling for cache entry * Adding request context handling * Merging trunk * Fixed review stuff from Brian * Allow delaying the actual deletion of an image * have the scrubber init a real context instead of a dict * merge trunk * Adds authentication middleware support in glance (integration to keystone will be performed as a piece of middleware extending this and committed to the keystone repository). Also implements private images. No limited-visibility shared image support is provided yet * Take out extraneous comments; tune up doc string; rename image_visible() to is_image_visible(); log authorization failures * use runs_sql instead of hackery * Updating setup.py per bin/image_cache removal * Removing bin/image_cache directory * Removing cache enabled flag from most confs * Removing imagecache from default WSGI pipeline * Allow plugging in alternate context classes so the owner property and the image_visible() method can be overridden * Make a context property 'owner' that returns the tenant; this makes it possible to change the concept of ownership by using a different context object * Unit tests for the context's image_visible() routine * We don't really need elevate().. * Merging in adding_image_caching * Importing module rather than function * PEP 8 fixes * Adding reap stalled images * Returning number of files deleted by cache-clear * Returning num_reaped from reap_invalid * Moving bin to image_cache/ * Fixing comment * Adding reaper script * Adding percent done to incomplete and invalid image listing * Renaming tmp_path to incomplete_path * Renaming tmp_path to incomplete_path * Renaming purge_all clear, less elegant variation * Refactor to use lookup_command, so command map is used in one place * Refactoring to use same command map between functions * Renaming to cache-prefetching * Renaming to cache-prefetch * Renaming to cache-purge-all * Renaming to cache-purge * Renaming to cache-invalid * Beginning to normalize names * Refactoring out common code * Refactoring prefetch * Refactoring purge * Refactoring purge_all * Refactoring listing of prefetching images * Using querystring params for invalid images * Link incoming context with image owner for authorization decisions * How in the world did I manage to forget this? *sigh* * Make tests work again * merge trunk * pull-up from trunk * This patch: * PEP8 nit * Added fix for Bug #813291: POST to /images setting x-image-meta-id to an already existing image id causes a 500 error * One more try.. * Yet another attempt to fix URIs * Add in security context information * Moving cached image list to middleware * Initial work on moving cached_images to WSGI middleware * API is now returning a 409 error on duplicate POST. I also modified the testcase to expect a 409 response * Add owner to database schema * Fix URI parsing on MacOSX - Python 2.6.1 urlparse bugs * Namespacing xattr keys * PEP8 fixes * Added 3 tests in tests/functional/test_httplib2_api.py to validate is_public filtering works * left in 2 fixes.. removing redundant fix * If meta-data contains an id field, pass it to _image_update() * Adding functional test to show bug #813291 * fixed an inline comment * removed pprint import, and added check for other 3 images to make sure is_public=True * Added 3 tests to validate is_public filtering works * Completed rewrite of tests/functional/test_curl_api.py using httplib2 * Changes the default filtering of images to only show is_public to actually use a default filter instead of hard coding. This allows us to override the default behavior by passing in a new filter * removing pprint import * completed rewrite of test_ordered_images().. this completes rewrite of test_curl_api using httplib2 * test_ordered_images() missing closing self.stop_servers() * finished rewrite of test_filtered_images() * add tests and make None filters work * Change default is_public = True to just set a default filter instead of hard coding so it can be overridden * make the tests work with new trunk * merge trunk * Refactoring PrettyTable so it doesn't print the lines itself * Adding pruner and prefetcher to setup.py * Removing extraneous text * PEP 8 fixes * Adding prefetching list to bin/glance * More cleanups * Adding prefetching of images * Overhaul the way that the store URI works. We can now support specifying the authurls for Swift and S3 with either an http://, an https:// or no prefix at all * Typo fix * Removing test exception * PEP 8 fixes * Adding Error to invalid cache images * Show invalid images from bin/glance * Improving comments * Cleaning up cache write * Moving xattrs out to utils * Clip and justify columns for display * Including last accessed time in cached list * Adding more comments * Adding hit counter * Pruning invalid cache entries after grace period * Clear invalid images when purging all cached images * Rollback by moving images to invalid_path * Improving comments * PEP8 fixes * Adding cached image purge to bin/glance * Adding purge all to bin/glance * Adding catch_error decorator to bin/glance * Adding 'cached' command to bin/glance * Write incomplete files to tmp path * Adding purge_all, skip if set if xattrs arent supported * Adding purge cache API call * Adding API call to query for cache entries * Create bin/glance-pruner * Adding image_caching * rewrote test_traceback_not_consumed(), working on test_filtered_images() * Only changes is reverting the patch that added migration to configure_db() and resets the in-memory SQLite database as the one used in functional testing. Yamahata's commits were unmodified.. * Reverts commit that did db migration during configure_db() and makes functional tests use in-memory database again. The issues we were seeing had to do with the timeout not being long enough when starting servers with disk-based registry databases and migrate taking too long when spinning up the registry server... this was shown in almost random failures of tests saying failure to start servers. Rather than increase the timeout from 3 seconds, I reverted the change that runs migrate on every startup and cut the total test duration down about 15 seconds * merged glance trunk * updated Authors * Resolves bug lp:803260, by adding a check to ensure req.headers['Accept'] exists before it gets assigned to a variable * run_tests.py: make test runner accepts plugins * run_tests.py: make run_tests.py work * Fix the poor error handling uncovered through bug in nova * Added stop_servers() to the end of the test cases * adding testing & error handling for invalid markers * removed pprint import * removed extra space on test_queued_process_flow method definition * removing commented out line * merged in lp:~jshepher/glance/functional_tests_using_httplib2_part2 * applied requested fix in merge-prop * Removing ordering numbers from the test cases, per jay pipes * cleaning up the 'no accept headers' test cases. this should fail until Bug lp:803260 is resolved * Cleaning up docstring spacing * rewrite of test_size_greater_2G_mysql from test_curl_api.py using httplib2. All tests currently pass * completed rewrite of test_003_version_variations. bug lp:803260 filed about step #0, and noted as a comment in code * Fix for bug 803188. This branch also proposed for merging into trunk * miss-numbering of steps * fixing pep8 violation * Added a check to ensure req.headers['Accept'] exists before it gets assigned to a variable. All unit/functional tests pass with this patch * half way done with rewrite of test_003_version_variations.. step #0 causes a 500 error unless we supply an Accept header * Prevent query params from being set to None instead of a dict * removing rogue print * fixing issue where filters are set to None * Backport for bug 803055 * rewrote test_002_queued_process_flow from test_curl_api.py, all 6 steps pass against trunk revno:146 * Backport for bug 803055 * Prevent clients from adding query parameters set to None * ignores None param values passed to do_request * cleaning up docstrings * merging trunk * docstring * Added sort_key and sort_dir query params to apis and clients * fixing one last docstring * docstrings\! * unit/test_config.py: make it independent on sys.argv * run_tests.py: make test runner accepts plugins * reverting one import change; another docstring fix * docstring * Switch image_data to be a file-like object instead of bare string in image creating and updating Without this Glance loads all image into memory, then copies it one time, then writes it to temp file, and only after all this copies image to target repository * Add myself to Authors file * cleaning up None values being passed into images_get_all_public db call * adding base client module * restructuring client code * merging trunk * Explicitly set headers rather than add them * fixing httplib2 functional test that was expecting wrong content-type value * merging trunk * rewrite of test_get_head_simple_post from tests/functional/test_curl_api.py using httplib2 * adding assert to check content_type in GET /images/ test * Explicitly setting Content-Type, Content-Length, ETag, Location headers to prevent duplication * Bug #801703: No logging is configured for unit tests * Bug #801703: No logging is configured for unit tests * Change image_data to body_file instead of body * reset _MAKER every test and make sure to stop the servers * Trunk merge, changed returned content-type header from 'application/octet-stream' to 'text/html; charset=UTF-8, application/octet-stream' * yea python strings * updated main docstring, as it was directly coppied from test_curl_api.py * merged trunk * refactoring for Jay * make image data a constant * Fixes build failures due to webob upgrade. Updated pop-requires as well * upgrading webob and fixing tests * - refactoring wsgi code to divide deserialization, controller, serialization among different objects - Resource object acts as coordinator * updating client docs * fixing bad request error messages * making SUPPORTED_* lists into tuples * slight refactoring * updating docs * adding ordering support to glance api * adding support to registry server and client for sort_key and sort_dir params * re-ordered imports, using alpha-ordering * removing unnecessary unittest import * moved httplib2 tests to their own test case file, and uncommented md5 match * updating docs; adding support for status filter * adding query filters to bin/glance details * adding query filters to bin/glance index * forgot to remove pprint import * adding hashlib as a dependency to pip-requires (not 100% sure it is not part of the base install though) * fixed pep8 violation * rewote the test #7 - #11 for testcase (test_get_head_simple_post) * refactoring for Brian * refactoring from Rick's comments * Added httplib2 dependency to tools/pip-requires * rewriting functional tests to utilize httplib2 instead of curl * make sure it runs as a daemon for the tests * default to no daemon * also allow for daemon in the config file so that we can test it easier * default to non-daemon mode * change order of paramaters and make event optional * initial refactoring from Jay's comments * remove eventlet import and leftover function from previous refactoring * remove file that got resurrected by accident * fixed test case * add functional tests of the scrubber and delayed_delete * start the scrubber in addition to the api and registry * add glance-scrubber to glance-control * call it a Daemon, cuz it is * Update Authors * add the function to the stubs * cleanup * adding tests for wsgi module * removing rogue print * further refactoring * adding refactored wsgi code from nova; moving registry api to new wsgi * delayed scrubbing now works * add the scrubber startup script * remove unnecessary option * add pending_delete to stub api * pep8 fixed * pep8 fixes * pass in the type we want so it gets converted properly * self leaked ;( * only return the results that we need to act on * allow passing of time to get only results earlier than the time' * server and scrubber work * update the docstring to reflect current * pass in a wakeup_time for the default time between database hits * start making the server that will periodicly scrub * Config file for the scrubber. We make our own connection to the db here and bypass using the registry client so we don't have to expose non-public images over the http connection * make the commits * Add webob>=1.0.7 requirement to tools/pip-requires * all delayed deletes will be going through a new service, if delayed_delete is False, then delete it right away, otherwise set it to pending_delete * add scrub file * set the image to pending delete prior to scheduling the delete * refactor a bit so the db gets updated as needed and we only trigger the delay if the config option is set * add scheduled_delete_from_backend which delays the deletion of images for at least 1 second * don't delete directly but schedule deletion * add the api function to get the images that are pending deleteion * add in delayed delete options * Add workaround for Webob bug issue #12 and fix DELETE operation in S3 where URL parsing was broken * Add ability to create missing s3 bucket on first post, similar to Swift driver * Adding support for marker/limit query params from api, through registry client/api, and implementing at registry db api layer * Bug #787296: test_walk_versions fails with SQLalchemy 0.7 * OK, fixes the issue where older versions of webob.Request did not have the body_file_seekable attribute. After investigation, turned out that webob.Request.make_body_seekable() method was available in all versions of webob, so we use that instead * Added new disk_format type of 'iso'. Nova can use this information to identify images that have to be booted from a CDROM * adding marker & limit params to glance client * Auto-migrate if the tables don't exist yet * Fix up unit tests for S3 after note from Chris. Also fix bug when S3 test was skipped, was returning error by accident * * Adds functional test that works with Amazon S3 * Fixes parsing of "S3 URLs" which urlparse utterly barfs on because Amazon stupidly allows forward slashes in their secret keys * Update /etc/glance-api.conf for S3 settings * merging trunk, resolving conflicts * fixing sql query * completing marker functionality * Call stop_servers() for those 2 test cases missing it * Correct documentation * Add missing stop_servers() calls to two functional test cases * Remove changes to stub database * Auto-migrate if tables don't exist * Fix accidental delete * Remove additions to FIXTURES in test/stubs.py, which requried changes elsewhere * Sync with trunk * Documentation for new results filtering in the API and client * Fix tiny typo * Documentation for new results filtering in the API and client * Adding support for query filtering from the glance client library * renaming query_params to params * abstracting out filters query param serialization into BaseClient.do_request * renaming tests to resolve conflict * adding filters param to get_images and get_images_detailed in glance client * Bug #787296: test_walk_versions fails with SQLalchemy 0.7 * Updated doc with 'iso' disk_format * Update documentation * Adding support for api query filtering - equality testing on select attributes: name, status, container_format, disk_format - relative comparison of size attribute with size_min, size_max - equality testing on user-defined properties (preface property name with "property-" in query) * updating stubs with new sorting logic; updating tests * fixing some copy/paste errors * fixing some webob exceptions * slight modification to registry db api to ensure marker works correctly * slight refactoring per jaypipes' suggestions; sort on get images calls is now created_at desc * Add tests for 'iso' image type. Remove hard coding of next available image id in tests. This prevents new test images from being added to the set generated by tests.unit.stubs.FakeDatastore * pulling from parent branch * docstring fix * pushing marker/limit logic down into registry db api * adding support for marker & limit query params * removing some unnecessary imports * making registry db api filters more structured; adding in a bit of sqlalchemy code to filter image properties more efficiently * consolidating image_get_all_public and image_get_filtered in registry db api * adding test case for multiple parameters from command line * adding custom property api filtering * adding size_min and size_max api query filters * implemented api filtering on name, status, disk_format, and container_format * Adds versioning to the Glance API * Add test and fix for /v1.2/images not properly returning version choices * Add more tests for version URIs and accept headers and fix up some of Brian's review comments * Fix merge conflict.. * Changes versioned URIs to be /v1/ instead of /v1.0/ * Improve logging configuration docs.. * Doc and docstring fixes from Dan's review * Removed some test config files that slipped in.. * Fix up find_config_file() to accept an app_name arg. Update all documentation referencing config files * Fix pep8 complaint * Add DISK_FORMAT for 'iso' type images * Adds versioning to Glance's API * Changes glance index to return all public images in any status other than 'killed'. This should allow tools like euca-describe-images to show images while they are in a saving/untarring/decrypting state * Fix numbering in comment.. * Fixed doh. Updates test case to test for condition that should have failed with status!='active' * Changes glance index to return all public images in any status other than 'killed'. This should allow tools like euca-describe-images to show images while they are in a saving/untarring/decrypting state * Adding prefilled Authors, mailmap files Adding test to validate Authors file is properly set up * Documentation updates to make glance add command clearer, hopefully :) * adding Authors functionality; fixing one rogue pep8 violation * Improve logging configuration docs.. * Prevent users from uploading images with a bad or missing store. Allow deletion from registry when backend cannot be used * bcwaldon review fixups * adding comment * Fix for bug #768969: glance index shows non-active images; glance show does not show status * Completes the S3 storage backend. The original code did not actually fit the API from boto it turned out, and the stubs that were in the unit test were hiding this fact * catching NotFound to prevent failure on bad location * Prevent requests with invalid store in location param * Allow registry deletion to succeed if store deletion fails * Documentation updates to make glance add command clearer, hopefully :) * Fix for LP Bug #768969 * Expanding user confirmation default behavior * removing excessive exception handling * pep8 fixes * docstring and exception handling * Expanding user_confirm default behavior * I modified documentation to show more first-time user friendly examples on using glance. With the previous examples, I followed it as a first-time user and had to spend more than necessary time to figure out how to use it. With this modification, other first-time users would make it work on their systems more quickly * - Require user confirmation for "bin/glance clear" and "bin/glance delete " - Allow for override with -f/--force command-line option * adding --force option to test_add_clear * Adds a test case for updating an image's Name attribute. glance update was not regarding 'name' as a top-level modifiable attribute.. * Name is an attribute that is modifiable in glance update, too. * Mark image properties as deleted when deleting images. Added a unit test to verify public images and their properties get deleted when running a 'glance clear' command * Update tests and .bzrignore to use tests.sqlite instead of glance.sqlite * Only modify the connection URL in runs_sql if the original connection string starts with 'sqlite' * Create a decorator that handles setting the SQL store to a disk-based SQLite database when arbitrary SQL statements need to be run against the registry database during a test case * Docstring update on the run_sql_command function * Mark image properties as deleted when deleting images. Added a unit test to verify public images and their properties get deleted when running a 'glance clear' command * Add log_file to example glance.conf * fixing spacing in help text * adding confirmation on image delete/clear; adding user_confirm functionality * Add log_file to example glance.conf * Make sure we use get_option() when dealing with boolean values read from configuration files...otherwise "False" is True :( * Fixing tests. Sorry for late response * Make sure we use get_option() when dealing with boolean values read from configuration files...otherwise "False" is True :( * resolve merge conflicts * chnaged output * Open Diablo release * Diablo versioning * Fake merge with ancient trunk. This is only so that people who "accidentally" have been following lp:~hudson-openstack/glance/trunk will not have problems updating to this * Final versioning for Cactus * fixing after review * Removes capture of exception from eventlet in _upload_and_activate(), which catches the exceptions that come from the _safe_kill() method properly * RickH fixups from review * Add catch-all except: block in _upload() * change output from glance-registry * get latest from lp:glance * Ensures that configuration values for debug and verbose are used if command-line options are not set * Removes capture of exception from eventlet in _upload_and_activate(), which catches the exceptions that come from the _safe_kill() method properly * Fix logging in swift * Fix Thierry's notice about switched debug and verbose * Change parsing of headers to accept 'True', 'on', 1 for boolean truth values * Final cactus versioning * OK, fix docs to make it clear that only the string 'true' is allowed for boolean headers. Add False-hood unit tests as well * Logging was not being setup with configuration file values for debug/verbose * Fix up the way the exception is raised from _safe_kill()... When I "fixed" bug 729726, I mistakenly used the traceback as the message. doh * Change parsing of headers to accept 'True', 'on', 1 for boolean truth values * Add the migration sql scripts to MANIFEST.in. The gets them included in not only the tarball, but also by setup.py install * Add the migration sql scripts to MANIFEST.in. The gets them included in not only the tarball, but also by setup.py install * Changed raise of exception to avoid displaying incorrect error message in _safe_kill() * fix logging in swift * Changes "key" column in image_properties to "name" * Updated properties should be marked as deleted=0. This allows previously deleted properties to be reactivated on an update * Adds --config-file option to common options processing * Update the docs in bin/glance so that help for the 'update' command states that metadata not specified will be deleted * Fix config test fixtures and pep8 error in bin/glance-manage * Provide revised schema and migration scripts for turning 'size' column in 'images' table to BIGINT. This overcomes a 2 gig limit on images sizes that can be downloaded from Glance * Updated properties should be marked as deleted=0. Add unit tests * Use logging module, not echo, for logging SQLAlchemy. Fixes bug 746435 * Change order of setting debug/verbose logging. Thanks for spotting this, Elgar * Use logging module, not echo, for logging SQLAlchemy. Fixes bug 746435 * Ensure we don't ask the backend store to delete an image if the image is in a queued or saving state, since clearly the backend state has yet to completely store the image * Changes "key" column in image_properties to "name" * Use logging module, not echo for logging SQLAlchemy * Updates glance-manage to use configuration files as well as command line options * Ensure we don't ask a backend store to delete an image if the image is queued or saving * Moved migration into Python script, otherwise PostgreSQL was not migrated. Added changes to the functional test base class to reset the data store between tests. GLANCE_SQL_CONNECTION env variable is now GLANCE_TEST_SQL_CONNECTION * changed to more typical examples * Add migration scripts for revising the datatype of the 'size' column in the images table * Changes to database schema required to support images larger than 2Gig on MySQL. Does not update the migration scripts * Updates to the Registry API such that only external requests to update image properties purge existing properties. The update_image call now contains an extra flag to purge_props which is set to True for external requests but False internally * Updates to the Registry API such that only external requests to update image properties purge existing properties. The update_image call now contains an extra flag to purge_props which is set to True for external requests but False internally * Update the glance registry so that it marks properties as deleted if they are no longer exist when images are updated * Simple one.. just add back the Changelog I removed by accident in r94. Fixes bug #742353 * Adds checksumming to Glance * Uhhhm, stop_servers() should stop servers, not start them! Thanks to Cory for uncovering this copy/paste fail * Fix up test case after merging in bug fixes from trunk... expected results were incorrect in curl test * Add ChangeLog back to MANIFEST.in * Add migration testing and migration for disk_format/container_format * tests.unit.test_misc.execute -> tests.utils.execute after merge * Allow someone to set the GLANCE_TEST_MIGRATIONS_CONF environment variable to override the config file to run for the migrations unit test: * Update the glance registry so that it marks properties as deleted if they are no longer in the update list * Start eventlet WSGI server with a logger to avoid stdout output * Adds robust functional testing to Glance * Add migration script for checksum column * Fixed an oops. Didn't realized Repository.latest returned a 0-based version number, and forgot to reversed() the downgrade test * OK, migrations are finally under control and properly tested * Remove non-existing files from MANIFEST.in * Removed glance-combined. Fixed README * Removed glance-commit * Re-raise _safe_kill() exception in non-3-arg form to avoid pep8 deprecation error * Bug #737979: glance-control uses fixed path to Python interpreter, breaking virtualenv * Bug #737979: glance-control uses fixed path to Python interpreter, breaking virtualenv * Removes glance-combined and fixes TypeError from bad function calls in glance-manage * Start eventlet WSGI server with a logger to avoid stdout output * Pass boolean values to glance.client as strings, not integers * Small adjustment on wait_for_servers()... fixed infinite loop possibility * Adds robust functional testing to Glance * Ensure Content-type set to application/octet-stream for GET /images/ * Ensure Content-Length sent for GET /images/ * HTTPBackend.get() needed options in kwargs * Remove glance-combined (use glance-control all start). Fix glance-manage to call the setup_logging() and add_logging_options() methods according to the way they are called in glance-api and glance-registry * Support account:user:key in Swift URIs. Adds unit tests for various calls to parse_swift_tokens() * Adds documentation on configuring logging and a unit test for checking simple log output * Support account:user:key in Swift URIs. Adds unit tests for various calls to parse_swift_tokens() * Cherry pick r86 from bug720816 * Cherry pick r87 from bug720816 * Fixed run_tests.py addError() method since I noted it was faulty in another branch.. * Tiny pep8'ers * I stole the colorized code from nova * Fix typo * A quick patch to allow running the test suite on an alternate db backend * Merged trunk -resolved conflicts * [Add] colorization stolen from nova * Don't require swift module for unit-tests * Pep8 fix * Backing out unit-test workaround * Changed to have 2 slashes * Allow unit-tests to run without swift module * Remove spurios comment in test file * Add Glance CLI tool * Silly mistake when resolving merge conflict...fixed * Fixes passing of None values in metadata by turning them into strings. Also fixes the passing of the deleted column by converting it to and from a bool. The test for passing metadata was updated to include these values * Adds documentation on configuring logging and a test that log_file works. It didn't, so this also inludes fixes for setting up log handling :) * fix data passing * add failing test for None and deleted * Uses logger instead of logging in migration.py * Using logger in migration api instead of logging directly * Only clean up in the cleanup method. Also, we don't need the separate URI now * Use unregister_models instead of os.unlink to clean up after ourselves * Fixed unregister_models to actually work * Fixed migration test to use a second DB URL * Replaced use of has_key with get + default value * Make it clear that the checksum is an MD5 checksum in docs * Adds checksumming to Glance * Whoops! Left out a self.db_path * Allow tests to run on an alternate dburi given via environment variables * Adds ability for Swift to be used as a full-fledged backend. Adds POST/PUT capabilities to the SwiftBackend Adds lots of unit tests for both FilesystemBackend and SwiftBackend Removes now-unused tests.unit.fakeswifthttp module * Remove last vestiges of account in Swift store * Quick fixup on registry.get_client() * Public? => Public: per Cory's comment. Added a little more robust exception handling to some methods in bin/glance * Fixes for Devin and Rick's reviews * Adds disk_format and container_format to Image, and removes the type column * Fixes client update_image to work like create_image. Also fixes some messed up exceptions that were causing a try, except to reraise * Final review fixes. Makes disk_format and container_format optional. Makes glance-upload --type put the type in properties * remove test skip * Put account in glance.conf.sample's swift_store_auth_address, use real swift.common.client.ClientException, ensure tests work with older installed versions of Swift (which do not have, for example, swift.common.client.Connection.get_auth method) * Work around Eventlet exception clearing by memorizing exception context and re-raising using 3-arg form * Adds bin/glance to setup.py * Fixes from Rick's review #1 * Reverts Image `type` back to the old behavior of being nullable * Work around Eventlet exception clearing * Add sys.path mangling to glance-upload * Add sys.path adjustment magic to glance-upload * Adds ability for Swift to be used as a full-fledged backend. Adds POST/PUT capabilities to the SwiftBackend Adds lots of unit tests for both FilesystemBackend and SwiftBackend Removes now-unused tests.unit.fakeswifthttp module * Couple tiny cleanups noticed when readin merge diff. * bin/glance-admin => bin/glance, since it's really just the CLI tool to interact with Glance. Added lots of documentation and more logging statements in some critical areas (like the glance.registry calls.. * Adds lots of unit tests for verifying exceptions are raised properly with invalid or mismatched disk and container formats * Makes --kernel and --ramdisk required arguments for glance-upload since Nova currently requires them * Removing image_type required behavior * Removing requirement to pass kernel and ramdisk * Add test cases for missing and invalid disk and container formats * Requiring kernel and ramdisk args in glance-upload * Make disk_format and container_format required * Make disk_format and container_format required * Adds an admin tool to Glance (bin/glance-admin) that allows a user to administer the Glance server: * Make sure validate_image() doesn't throw exception on missing status when updating image * Adds disk_format and container_format to Image, and removes the type column * This adds a test case for LP Bug 704854 -- Exception raised by Registry server gets eaten by API server * Add debugging output to assert in test_misc. Trying to debug what Hudson fails on.. * Fixups from Rick's review * Removes now-unnecessary @validates decorator on model * I should probably rebase this commit considering all the previous commits weren't actually addressing the issue. The fact that I had glance-api and glance-registry installed on my local machine was causing the test runs to improperly return a passing result * Use Nova's path trick in all bins.. * Add path to glance-control * Removes image type validation in the Glance registry * Adding vhd as recognized image type * Reverting the removal of validation * Removing image type validation * Adds --pid-file option to bin/glance-control * Add %default for image type in glance-upload * Adds Location: header to return from API server for POST /images, per APP spec * Cleanups from Soren's review * Add an ImportError check when importing migrate.exceptions, as the location of that module changed in a recent version of the sqlalchemy-migrate library * Adds Location: header to return from API server for POST /images, per APP spec * This adds a test case for LP Bug 704854 -- Exception raised by Registry server gets eaten by API server * Adds --pid-file option to bin/glance-control * Add an ImportError check when importing migrate.exceptions, as the location of that module changed in a recent version of the sqlalchemy-migrate library * Adds sql_idle_timeout to reestablish connections to database after given period of time * Add sql_idle_timeout * Removes lockfile and custom python-daemon server initialization in favour of paste.deploy * Review 3 fixups * Remove get_config_file_options() from glance-control * Fixes for Rick review #2 * Remove no-longer-needed imports.. * Remove extraneous debug import.. * Changes the server daemon programs to be configured only via paste.deploy configuration files. Removed ability to configure server options from CLI options when starting the servers with the exception of --verbose and --debug, which are useful during debugging * Adds glance-combined and glance-manage to setup.py * Fix merge conflicts * Adds glance-combined and glance-manage to setup.py * Fixes bug 714454 * ReStructure Text files need to end in .rst, not .py ;) * Update README, remove some vestigial directories, and other small tweaks * Removing dubious advice * Adds facilities for configuring Glance's servers via configuration files * Use fix_path on find_config_file() too * Fixups from Rick's review * Including tests/ in pep8 * Typo fixes, clarifying * Updating README, rmdir some empty dirs * Adds bin/glance-control program server daemonization wrapper program based on Swift's swift-init script * Ignore build and deploy-related files * Adds sqlalchemy migrations * Fix bug 712575. Make BASE = models.BASE * Make sure BASE is the models.BASE, not a new declarative_base() object * Had to reverse search order of directories for finding config files * Removes lockfile and custom python-daemon server initialization in favour of paste.deploy * Adds facilities for configuring Glance's servers via configuration files * Creating indexes * Adding migration test * Fixing migration import errors * Small cleanups * glance-manage uses common options * Merging in glance/cactus * Pep8 fix * Pep8 fixes * Refactoring into option groups * Hopefully-final versioning (0.1.7), no review needed * Final versioning, no review needed * Adding db_sync to mirror nova * Adding some basic documentation * Better logging * Adding image_properties migration * Adding migration for images table * Adding migration management commands * Remove debugging output that wasn't supposed to go into this branch (yet) :) * Adds --debug option for DEBUG-level logging. --verbose now only outputs INFO-level log records * Typo add_option -> add_options * Fixes from Rick's review. Thanks, Rick * Adds --sql-connection option * First round of logging functionality: * Merged use-optparse * Removes glance.common.db.sqlalchemy and moves registration of models and create_engine into glance.registry.db.api * pep8-er in bin/glance-combined * Fixes lp710789 - use-optparse breaks daemonized process stop * Adds bin/glance-combined. Useful in testing.. * Tiny pep8 fixup in setup.py * Rework what comes back from parse_options()[0] to not stringify option values. Keep them typed * Remove use of gflags entirely. Use optparse * Removing unecessary param to get_all_public * Merging trunk * Adding back some missing code * Cleaning up some code * Makes Glance's versioning non-static. Uses Nova's versioning scheme * Adds/updates the copyright info on most of the files in glance and copies over the Authors check from Nova * Removing sqlalchemy dir * Removed methods from sqlalchemy/api * Refactor update/create * Messed up a permission somehow * Refactoring destroy * feh * A few more * A few more I missed * version bumped after tarball cut. no review needed.. * Bump version * Removing authors test for now * PEP8 cleanup * PEP8 cleanup * Should fix the sphinx issue * Adds architecture docs and enables Graphviz sphinx extension. Also cleans up source code formatting in docs * Make sphinx conditional * bumps version after tarball release of 0.1.4 * Bump version * Added bzr to pip-requires and refixed some pep8 stuff * Authors check * A few more copyrights * Copyright year change * Pylint cleanup * Added copyright info * Adds architecture docs and enables Graphviz sphinx extension. Also cleans up source code formatting in docs * bumps release version. ready for Bexar final release * Version bump after release * added sphinx and argparse into tools/pip-requires so that setup.py works. this bug also prevents nova from creating a virtualenv * fixes setup install pip dependencies * Version bump for release * Fixes bug #706636: Make sure pep8 failures will return failure for run_tests.sh * Make run_tests.sh return failure when pep8 returns fail, and fix the pep8 error in /bin/glance-upload * This patch: * Converts dashes to underscores when extracting image-properties from HTTP headers (we already do this for 'regular' image attributes * Update image_properties on image PUTs rather than trying to create dups * This patch replaces some remaining references to req.body (which buffers the entire request body into memory!) with the util.has_body method which can determine whether a body is present without reading any of it into memory * Adding Apache license, fixing long line * Making glance-upload a first-class binary * Revove useless test_data.py file, add image uploader * Fix property create * Dont buffer entire image stream on PUT * Adds man pages for glance-registry and glance-api programs. Adds Getting Started guide to the Glance documentation * Fixes LP Bug #700162: Images greater than 2GB cannot be uploaded using glance.client.Client * Duh, it helps to import the class you are inheriting from... * OK, found a solution to our test or functional dilemma. w00t * Make compat with chunked transfer * Removes the last vestiges of Twisted from Glance * Pull in typo fix * Add in manpage installation hook. Thanks Soren :) * Fixes LP Bug #700162: Images greater than 2GB cannot be uploaded using glance.client.Client * Removes Twisted from tools/install_venv.py and zope.interface from tools/pip-requires. Shaved a full 45 seconds for me off of run_tests.sh -V -f now we're not downloading a giant Twisted tarball.. * Remove last little vestiges of twisted * Quick typo fix in docs * Add run_tests.py to tarball * Also include run_tests.py in tarball * Adds man pages for glance-registry and glance-api. Adds Getting Started guide to Glance docs * Fixes bug #696375: x-image-meta-size not optional despite documentation saying so * PEP8 fixes in /glance/store/__init__.py * Fix Bug #704038: Unable to start or connect to register server on anything other than 0.0.0.0:9191 * Fix Bug #704038: Unable to start or connect to register server on anything other than 0.0.0.0:9191 * upgrade version.. * Fixes Bug#696375: x-image-meta-size is not optional, contrary to documentation * Increase version after release * Cut 0.1.2 * Files missing from the tarball (and you probably need to cut a 0.1.2.) * Cleanup of RST documentation and addition of docs on an image's status * Include some files that were left out * Implements the S3 store to the level of the swift store * fixes bug698318 * Fixes suggested by JayPipes review. Did not modify docstrings in non-related files * This merge is in conjunction with lp:~rconradharris/nova/xs-snap-return-image-id-before-snapshot * Updating docs * Merging trunk * Clean up the rest of Glance's PEP8 problems * PEP-8 Fixes * Fixing eventlet-raise issue * Bug #698316: Glance reads the whole image into memory when handling a POST /images request * Merging trunk * Fixed pylint/pep8 for glance.store.s3 * Implement S3 to the level of swift * removing old methods * refactoring so update can take image_data * More PEP8 fixes * Fix all Glance's pep8 problems * Remove incorrect doccomments about there being a default for the host parameter, fix misdocumented default port, and remove handling of missing parameters in BaseClient, because the values are always specified by the subclass's __init__ * Bug #696385: Glance is not pep8-clean * Bug #696382: Glance client parameter defaults misdocumented * Fixes a number of things that came up during initial coding of the admin tool: * Made review changes from Rick * Duh, use_ssl should not use HTTPConnection.. * Remove final debugging statement * merge trunk * Remove debugging statements * Fixes a number of things that came up during initial coding of the admin tool: * fix bug 694382 * Bug #694382: setup.py refers to parallax-server and teller-server, when these have been renamed * documentation cleanup and matching to other OpenStack projects. Glance is no longer the red-headed documentation stepchild in OpenStack.. * Converts timestamp attributes to datetime objects before persisting * Adding __protected_attributes__, some PEP8 cleanups * review fixes * Update sphinx conf to match other OpenStack projects * Documentation cleanup. Splits out index.rst into multiple section docs * Converting to datetime before saving image * Enhances POST /images call to, you know, actually make it work.. * Make directory for filesystem backend * doing the merge of this again...somehow the trunk branch never got rev26 :( * Adds POST /images work that saves image data to a store backend * Update docs for adding image.. * Fix Chris minor nit on docstring * Fixes binaries, updates WSGI file to more recent version from Nova, and fixes an issue in SQLAlchemy API that was being hidden by stubs and only showed up when starting up the actual binaries and testing.. * Major refactoring.. * Fix testing/debug left in * Fixes from review * Documentation updates and GlanceClient -> Client * Refactor a bunch of stuff around the image files collection * Cleanup around x-image-meta and x-image-meta-property HTTP headers in GET/HEAD * Update /glance/client.py to have GlanceClient do all operations that RegistryClient does * Merges Glance API with the registry API: * Makes HEAD /images/ return metadata in headers * Make GET /images/ return image data with metadata in headers Updates docs some (more needed) * Second step in simplifying the Glance API * This is the first part of simplifying the Glance API and consolidating the Teller and Parallax APIs into a single, unified Glance API * Adds DELETE call to Teller API * Fixes Swift URL Parsing in Python 2.6.5 by adding back netloc * Moving imports into main which will only be executed after we daemonize thus avoiding the premature initialization of epoll * Delaying eventlet import until after daemonization * Fix Swift URL parsing for Python 2.6.5 * Don't leak implementation details in Swift backend. Return None on successful delete_object call * Adds call to Swift's DELETE * Typo fixed and tiny cleanups * Adds DELETE to Teller's API * Just some small cleanups, fixing: * Swapped port numbers (Parallax Port <=> Teller port) * Removing extraneous routes in Teller API * Adding required slashes to do_request * * Changes Teller API to use REST with opaque ID sent in API calls instead of a "parallax URI". This hides the URI stuff behind the API layer in communication between Parallax and Teller. * Adds unit tests for the only complete Teller API call so far: GET images/, which returns a gzip'd string of image data * Fixing swapped port numbers, removing extraneous routes in Teller controller, adding required slash for do_request calls * * Changes Teller API to use REST with opaque ID sent in API calls instead of a "parallax URI". This hides the URI stuff behind the API layer in communication between Parallax and Teller. * Adds unit tests for the only complete Teller API call so far: GET images/, which returns a gzip'd string of image data * Add files attribute to Parallax client tests * Adds client classes for Parallax and Teller and fixes some issues where our controller was not returning proper HTTP response codes on errors.. * Cleanup/fixes for Rick review * Adds client classes ParallaxClient and (stubbed) TellerClient to new glance.client module * packaging fixups preparing for release candidate * Remove symlinks in bin/ * Packaging fixups * awesomeness. merging into trunk since my parallax-api is already in trunk I believe. :) * Moving ATTR helpers into db module * PUTing and POSTing using image key * Quick fix...gives base Model an update() method to make it behave like a dict * Make returned mapping have an 'image' key to help in XML serialization * Ignore virtualenv directory in bzr * This patch removes unique index on the 'key' column of image_metadatum and replaces it with a compound UniqueConstraint on 'image_id' and 'key'. The 'key' column remains indexed * Fixes lp653358 * Renaming is_cloudfiles_available -> is_swift_available * Adds compound unique constraint to ImageMetadatum * Using swift.common.client rather than python-cloudfiles in Teller's Swift backend * Adds DELETE to the Parallax REST API * Implements the REST call for updating image metadata in the Parallax API * Implements Parallax API call to register a new image * Adds a /images/detail route to the Parallax controller, adds a unit test for it, and cleans up Michael's suggestions * Works around non-RFC compliance in Python (< 2.6.5) urlparse library * Workaround for bug in Python 2.6.1 urlparse library * Adds tests for bad status set on image * Implements Parallax API call to register a new image * This patch overhauls the testing in Glance: * unittest2 -> unittest. For now, since not using unittest2 features yet * Fixes up test_teller_api.py to use stubout correctly. Fixes a few bugs that showed up in the process, and remove the now-unnecessary FakeParallaxAdapter * First round of cleaning up the unittests. Adds test suite runner, support for virtualenv setup and library dependencies, resolves issues with ImportErrors on cloudfiles, adds pymox/stubout support and splits the backend testing into distinct unittest cases * With this patch Parallax and teller now work end-to-end with the Swift backend * Adding missing backend files, fixing typos in comments * This patch: * Decouples Controller for ParallaxAdapter implementation by adding generic RegistryAdapter and providing a lookup function * Adds base model attributes to Parallax's JSON (created_at, etc) * Improving symmetry between teller and parallax * Fixing swift authurl * Add RegistryAdapter, include ModelBase attributes * Fixing Teller image tests * Created teller-server.py in bin/ * Cleaning up Teller backend * Rewrote ImageController to inherit from the work Rick Harris did in glance.common. Moved it into teller/api/images.py to make teller match parallax. Fixed tests. Renamed them to distinguish if any parallax tests ever get written * Adding Image index call, nesting the Image show dict to facilitate XML serialization * Moving parallax models out of common and into the parallax module * Updated tests * Reimplements server.py as a wsgi api inheriting from glance.common * This patch: * pulls in a number of useful libraries from Nova under the common/ path (we can factor those out to a shared library in Bexar-release) * Defines the models in common.db.sqlalchemy.models.py (this should be factored out into the parallax package soon) * Adds the parallax api-server under /bin (if PyPI was used to pull python-daemon and python-lockfile, you may need to apply a patch I have against it) * Changes the obj['uri'] to obj['location'] to better sync with the representation within Nova. Adds the image_lookup_fn = ParallaxAdapter.lookup to teller.server * ImageChunk -> ImageFile, merging APIRouter into API for now * Adding Apache header to test_data.py * Small cleanups * Parallax will return obj['location'] instead of obj['uri'], also maybe a parallax lookup fn would be nice? * Implements a Parallax adapter for looking up images requested from nova. Adds a size check to SwiftBackend to ensure that the chunks haven't been truncated or anything * Reconciling parallax modifications with modulization of glance * Adding Images controller * Adding API directory and server.py * Modulify the imports * Implements Parallax adapter for lookups from Teller, also adds size expectations to the backend adapters * Adding files from Nova * Makes glance a module, containing teller and parallax sub-modules * libify glance into teller and parallax modules. Make nosetests work by making tests and tests/unit/ into packages * Rearranged the code a little. Added a setup.py. Added sphinx doc skeleton * Added setup.py and sphinx docs * Reorg to make Monty's build pedanticness side happier * Implements Swift backend for teller * ignore all .pyc files * Merging ricks changes * Adding basic image controller and mock backends * Adding description of registry data structure * Adding teller_server * adding filesystem and http backends * Initial check-in glance-12.0.0/PKG-INFO0000664000567000056710000000212712701407204015260 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: glance Version: 12.0.0 Summary: OpenStack Image Service Home-page: http://docs.openstack.org/developer/glance/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ====== Glance ====== Glance is a project that defines services for discovering, registering, retrieving and storing virtual machine images. Use the following resources to learn more: * `Official Glance documentation `_ * `Official Client documentation `_ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 glance-12.0.0/rally-jobs/0000775000567000056710000000000012701407204016237 5ustar jenkinsjenkins00000000000000glance-12.0.0/rally-jobs/glance.yaml0000664000567000056710000000206612701407047020365 0ustar jenkinsjenkins00000000000000--- GlanceImages.create_and_list_image: - args: image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" container_format: "bare" disk_format: "qcow2" runner: type: "constant" times: 20 concurrency: 5 context: users: tenants: 1 users_per_tenant: 1 GlanceImages.create_and_delete_image: - args: image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" container_format: "bare" disk_format: "qcow2" runner: type: "constant" times: 20 concurrency: 5 context: users: tenants: 5 users_per_tenant: 2 GlancePlugin.create_and_list: - args: image_location: "~/.rally/extra/fake.img" container_format: "bare" disk_format: "qcow2" runner: type: "constant" times: 700 concurrency: 7 context: users: tenants: 1 users_per_tenant: 1 glance-12.0.0/rally-jobs/plugins/0000775000567000056710000000000012701407204017720 5ustar jenkinsjenkins00000000000000glance-12.0.0/rally-jobs/plugins/plugin_sample.py0000664000567000056710000000617412701407047023146 0ustar jenkinsjenkins00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Sample of plugin for Glance. For more Glance related benchmarks take a look here: github.com/openstack/rally/tree/master/samples/tasks/scenarios/glance About plugins: https://rally.readthedocs.org/en/latest/plugins.html Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts """ import os from rally.plugins.openstack import scenario from rally.task import atomic from rally.task import utils class GlancePlugin(scenario.OpenStackScenario): @atomic.action_timer("glance.create_image_label") def _create_image(self, image_name, container_format, image_location, disk_format, **kwargs): """Create a new image. :param image_name: String used to name the image :param container_format: Container format of image. Acceptable formats: ami, ari, aki, bare, ovf, and docker. :param image_location: image file location used to upload :param disk_format: Disk format of image. Acceptable formats: ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso. :param **kwargs: optional parameters to create image returns: object of image """ kw = { "name": image_name, "container_format": container_format, "disk_format": disk_format, } kw.update(kwargs) try: if os.path.isfile(os.path.expanduser(image_location)): kw["data"] = open(os.path.expanduser(image_location)) else: kw["copy_from"] = image_location image = self.clients("glance").images.create(**kw) image = utils.wait_for(image, is_ready=utils.resource_is("active"), update_resource=utils.get_from_manager(), timeout=100, check_interval=0.5) finally: if "data" in kw: kw["data"].close() return image @atomic.action_timer("glance.list_images_label") def _list_images(self): return list(self.clients("glance").images.list()) @scenario.configure(context={"cleanup": ["glance"]}) def create_and_list(self, container_format, image_location, disk_format, **kwargs): self._create_image(self.generate_random_name(), container_format, image_location, disk_format, **kwargs) self._list_images() glance-12.0.0/rally-jobs/plugins/README.rst0000664000567000056710000000060512701407047021415 0ustar jenkinsjenkins00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators.glance-12.0.0/rally-jobs/extra/0000775000567000056710000000000012701407204017362 5ustar jenkinsjenkins00000000000000glance-12.0.0/rally-jobs/extra/fake.img0000664000567000056710000000000012701407047020761 0ustar jenkinsjenkins00000000000000glance-12.0.0/rally-jobs/extra/README.rst0000664000567000056710000000025412701407047021057 0ustar jenkinsjenkins00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* glance-12.0.0/rally-jobs/README.rst0000664000567000056710000000176112701407047017740 0ustar jenkinsjenkins00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * glance.yaml is a task that is run in gates against OpenStack (nova network) deployed by DevStack Useful links ------------ * More about Rally: https://rally.readthedocs.org/en/latest/ * Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins glance-12.0.0/MANIFEST.in0000664000567000056710000000071412701407047015726 0ustar jenkinsjenkins00000000000000include run_tests.sh ChangeLog include README.rst builddeb.sh include MANIFEST.in pylintrc include AUTHORS include run_tests.py include HACKING.rst include LICENSE include ChangeLog include babel.cfg tox.ini include glance/db/sqlalchemy/migrate_repo/README include glance/db/sqlalchemy/migrate_repo/migrate.cfg include glance/db/sqlalchemy/migrate_repo/versions/*.sql graft doc graft etc graft glance/locale graft glance/tests graft tools global-exclude *.pyc glance-12.0.0/glance.egg-info/0000775000567000056710000000000012701407204017104 5ustar jenkinsjenkins00000000000000glance-12.0.0/glance.egg-info/requires.txt0000664000567000056710000000162412701407203021506 0ustar jenkinsjenkins00000000000000pbr>=1.6 SQLAlchemy<1.1.0,>=1.0.10 eventlet!=0.18.3,>=0.18.2 PasteDeploy>=1.5.0 WebOb>=1.2.3 sqlalchemy-migrate>=0.9.6 httplib2>=0.7.5 pycrypto>=2.6 oslo.config>=3.7.0 oslo.concurrency>=3.5.0 oslo.context>=0.2.0 oslo.service>=1.0.0 oslo.utils>=3.5.0 stevedore>=1.5.0 futurist>=0.11.0 taskflow>=1.26.0 keystoneauth1>=2.1.0 keystonemiddleware!=4.1.0,>=4.0.0 WSME>=0.8 PrettyTable<0.8,>=0.7 Paste jsonschema!=2.5.0,<3.0.0,>=2.0.0 python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 pyOpenSSL>=0.14 six>=1.9.0 oslo.db>=4.1.0 oslo.i18n>=2.1.0 oslo.log>=1.14.0 oslo.messaging>=4.0.0 oslo.middleware>=3.0.0 oslo.policy>=0.5.0 oslo.serialization>=1.10.0 retrying!=1.3.0,>=1.2.3 osprofiler>=1.1.0 glance-store>=0.13.0 semantic-version>=2.3.1 castellan>=0.3.1 cryptography>=1.0 debtcollector>=1.2.0 iso8601>=0.1.9 monotonic>=0.6 [:(python_version!='2.7')] Routes!=2.0,>=1.12.3 [:(python_version=='2.7')] Routes!=2.0,!=2.1,>=1.12.3 glance-12.0.0/glance.egg-info/dependency_links.txt0000664000567000056710000000000112701407203023151 0ustar jenkinsjenkins00000000000000 glance-12.0.0/glance.egg-info/not-zip-safe0000664000567000056710000000000112701407165021340 0ustar jenkinsjenkins00000000000000 glance-12.0.0/glance.egg-info/SOURCES.txt0000664000567000056710000005474312701407204021005 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst babel.cfg bandit.yaml pylintrc requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini doc/source/architecture.rst doc/source/artifact-types.rst doc/source/authentication.rst doc/source/cache.rst doc/source/common-image-properties.rst doc/source/conf.py doc/source/configuring.rst doc/source/controllingservers.rst doc/source/database_architecture.rst doc/source/db.rst doc/source/domain_implementation.rst doc/source/domain_model.rst doc/source/flows.rst doc/source/formats.rst doc/source/glanceapi.rst doc/source/glanceclient.rst doc/source/glancemetadefcatalogapi.rst doc/source/identifiers.rst doc/source/image-location-strategy-modules.rst doc/source/index.rst doc/source/installing.rst doc/source/metadefs-concepts.rst doc/source/notifications.rst doc/source/policies.rst doc/source/property-protections.rst doc/source/statuses.rst doc/source/tasks.rst doc/source/_static/.placeholder doc/source/contributing/blueprints.rst doc/source/contributing/index.rst doc/source/images/architecture.png doc/source/images/glance_db.png doc/source/images/glance_layers.png doc/source/images/image_status_transition.png doc/source/images_src/architecture.graphml doc/source/images_src/glance_db.graphml doc/source/images_src/glance_layers.graphml doc/source/images_src/image_status_transition.dot doc/source/man/footer.rst doc/source/man/general_options.rst doc/source/man/glanceapi.rst doc/source/man/glancecachecleaner.rst doc/source/man/glancecachemanage.rst doc/source/man/glancecacheprefetcher.rst doc/source/man/glancecachepruner.rst doc/source/man/glancecontrol.rst doc/source/man/glancemanage.rst doc/source/man/glanceregistry.rst doc/source/man/glancereplicator.rst doc/source/man/glancescrubber.rst doc/source/man/openstack_options.rst doc/source/opts/glance_api.rst doc/source/opts/glance_cache.rst doc/source/opts/glance_manage.rst doc/source/opts/glance_registry.rst doc/source/opts/glance_scrubber.rst doc/source/opts/index.rst etc/glance-api-paste.ini etc/glance-api.conf etc/glance-cache.conf etc/glance-glare-paste.ini etc/glance-glare.conf etc/glance-manage.conf etc/glance-registry-paste.ini etc/glance-registry.conf etc/glance-scrubber.conf etc/glance-swift.conf.sample etc/ovf-metadata.json.sample etc/policy.json etc/property-protections-policies.conf.sample etc/property-protections-roles.conf.sample etc/schema-image.json etc/metadefs/README etc/metadefs/cim-processor-allocation-setting-data.json etc/metadefs/cim-resource-allocation-setting-data.json etc/metadefs/cim-storage-allocation-setting-data.json etc/metadefs/cim-virtual-system-setting-data.json etc/metadefs/compute-aggr-disk-filter.json etc/metadefs/compute-aggr-iops-filter.json etc/metadefs/compute-aggr-num-instances.json etc/metadefs/compute-cpu-pinning.json etc/metadefs/compute-guest-memory-backing.json etc/metadefs/compute-guest-shutdown.json etc/metadefs/compute-host-capabilities.json etc/metadefs/compute-hypervisor.json etc/metadefs/compute-instance-data.json etc/metadefs/compute-libvirt-image.json etc/metadefs/compute-libvirt.json etc/metadefs/compute-quota.json etc/metadefs/compute-randomgen.json etc/metadefs/compute-trust.json etc/metadefs/compute-vcputopology.json etc/metadefs/compute-vmware-flavor.json etc/metadefs/compute-vmware-quota-flavor.json etc/metadefs/compute-vmware.json etc/metadefs/compute-watchdog.json etc/metadefs/compute-xenapi.json etc/metadefs/glance-common-image-props.json etc/metadefs/operating-system.json etc/metadefs/software-databases.json etc/metadefs/software-runtimes.json etc/metadefs/software-webservers.json etc/metadefs/storage-volume-type.json etc/oslo-config-generator/glance-api.conf etc/oslo-config-generator/glance-cache.conf etc/oslo-config-generator/glance-glare.conf etc/oslo-config-generator/glance-manage.conf etc/oslo-config-generator/glance-registry.conf etc/oslo-config-generator/glance-scrubber.conf glance/__init__.py glance/context.py glance/gateway.py glance/i18n.py glance/location.py glance/notifier.py glance/opts.py glance/schema.py glance/scrubber.py glance/version.py glance.egg-info/PKG-INFO glance.egg-info/SOURCES.txt glance.egg-info/dependency_links.txt glance.egg-info/entry_points.txt glance.egg-info/not-zip-safe glance.egg-info/pbr.json glance.egg-info/requires.txt glance.egg-info/top_level.txt glance/api/__init__.py glance/api/authorization.py glance/api/cached_images.py glance/api/common.py glance/api/policy.py glance/api/property_protections.py glance/api/versions.py glance/api/glare/__init__.py glance/api/glare/versions.py glance/api/glare/v0_1/__init__.py glance/api/glare/v0_1/glare.py glance/api/glare/v0_1/router.py glance/api/middleware/__init__.py glance/api/middleware/cache.py glance/api/middleware/cache_manage.py glance/api/middleware/context.py glance/api/middleware/gzip.py glance/api/middleware/version_negotiation.py glance/api/v1/__init__.py glance/api/v1/controller.py glance/api/v1/filters.py glance/api/v1/images.py glance/api/v1/members.py glance/api/v1/router.py glance/api/v1/upload_utils.py glance/api/v2/__init__.py glance/api/v2/image_actions.py glance/api/v2/image_data.py glance/api/v2/image_members.py glance/api/v2/image_tags.py glance/api/v2/images.py glance/api/v2/metadef_namespaces.py glance/api/v2/metadef_objects.py glance/api/v2/metadef_properties.py glance/api/v2/metadef_resource_types.py glance/api/v2/metadef_tags.py glance/api/v2/router.py glance/api/v2/schemas.py glance/api/v2/tasks.py glance/api/v2/model/__init__.py glance/api/v2/model/metadef_namespace.py glance/api/v2/model/metadef_object.py glance/api/v2/model/metadef_property_item_type.py glance/api/v2/model/metadef_property_type.py glance/api/v2/model/metadef_resource_type.py glance/api/v2/model/metadef_tag.py glance/api/v3/__init__.py glance/api/v3/router.py glance/async/__init__.py glance/async/taskflow_executor.py glance/async/utils.py glance/async/flows/__init__.py glance/async/flows/base_import.py glance/async/flows/convert.py glance/async/flows/introspect.py glance/async/flows/ovf_process.py glance/cmd/__init__.py glance/cmd/api.py glance/cmd/cache_cleaner.py glance/cmd/cache_manage.py glance/cmd/cache_prefetcher.py glance/cmd/cache_pruner.py glance/cmd/control.py glance/cmd/glare.py glance/cmd/manage.py glance/cmd/registry.py glance/cmd/replicator.py glance/cmd/scrubber.py glance/common/__init__.py glance/common/auth.py glance/common/client.py glance/common/config.py glance/common/crypt.py glance/common/exception.py glance/common/jsonpatchvalidator.py glance/common/property_utils.py glance/common/rpc.py glance/common/semver_db.py glance/common/signature_utils.py glance/common/store_utils.py glance/common/swift_store_utils.py glance/common/timeutils.py glance/common/trust_auth.py glance/common/utils.py glance/common/wsgi.py glance/common/wsme_utils.py glance/common/glare/__init__.py glance/common/glare/declarative.py glance/common/glare/definitions.py glance/common/glare/loader.py glance/common/glare/serialization.py glance/common/location_strategy/__init__.py glance/common/location_strategy/location_order.py glance/common/location_strategy/store_type.py glance/common/scripts/__init__.py glance/common/scripts/utils.py glance/common/scripts/image_import/__init__.py glance/common/scripts/image_import/main.py glance/contrib/__init__.py glance/contrib/plugins/__init__.py glance/contrib/plugins/artifacts_sample/__init__.py glance/contrib/plugins/artifacts_sample/base.py glance/contrib/plugins/artifacts_sample/setup.cfg glance/contrib/plugins/artifacts_sample/setup.py glance/contrib/plugins/artifacts_sample/v1/__init__.py glance/contrib/plugins/artifacts_sample/v1/artifact.py glance/contrib/plugins/artifacts_sample/v2/__init__.py glance/contrib/plugins/artifacts_sample/v2/artifact.py glance/contrib/plugins/image_artifact/__init__.py glance/contrib/plugins/image_artifact/requirements.txt glance/contrib/plugins/image_artifact/setup.cfg glance/contrib/plugins/image_artifact/setup.py glance/contrib/plugins/image_artifact/version_selector.py glance/contrib/plugins/image_artifact/v1/__init__.py glance/contrib/plugins/image_artifact/v1/image.py glance/contrib/plugins/image_artifact/v1_1/__init__.py glance/contrib/plugins/image_artifact/v1_1/image.py glance/contrib/plugins/image_artifact/v2/__init__.py glance/contrib/plugins/image_artifact/v2/image.py glance/db/__init__.py glance/db/metadata.py glance/db/migration.py glance/db/registry/__init__.py glance/db/registry/api.py glance/db/simple/__init__.py glance/db/simple/api.py glance/db/sqlalchemy/__init__.py glance/db/sqlalchemy/api.py glance/db/sqlalchemy/glare.py glance/db/sqlalchemy/metadata.py glance/db/sqlalchemy/models.py glance/db/sqlalchemy/models_glare.py glance/db/sqlalchemy/models_metadef.py glance/db/sqlalchemy/metadef_api/__init__.py glance/db/sqlalchemy/metadef_api/namespace.py glance/db/sqlalchemy/metadef_api/object.py glance/db/sqlalchemy/metadef_api/property.py glance/db/sqlalchemy/metadef_api/resource_type.py glance/db/sqlalchemy/metadef_api/resource_type_association.py glance/db/sqlalchemy/metadef_api/tag.py glance/db/sqlalchemy/metadef_api/utils.py glance/db/sqlalchemy/migrate_repo/README glance/db/sqlalchemy/migrate_repo/__init__.py glance/db/sqlalchemy/migrate_repo/manage.py glance/db/sqlalchemy/migrate_repo/migrate.cfg glance/db/sqlalchemy/migrate_repo/schema.py glance/db/sqlalchemy/migrate_repo/versions/001_add_images_table.py glance/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py glance/db/sqlalchemy/migrate_repo/versions/003_add_disk_format.py glance/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql glance/db/sqlalchemy/migrate_repo/versions/003_sqlite_upgrade.sql glance/db/sqlalchemy/migrate_repo/versions/004_add_checksum.py glance/db/sqlalchemy/migrate_repo/versions/005_size_big_integer.py glance/db/sqlalchemy/migrate_repo/versions/006_key_to_name.py glance/db/sqlalchemy/migrate_repo/versions/006_mysql_downgrade.sql glance/db/sqlalchemy/migrate_repo/versions/006_mysql_upgrade.sql glance/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql glance/db/sqlalchemy/migrate_repo/versions/006_sqlite_upgrade.sql glance/db/sqlalchemy/migrate_repo/versions/007_add_owner.py glance/db/sqlalchemy/migrate_repo/versions/008_add_image_members_table.py glance/db/sqlalchemy/migrate_repo/versions/009_add_mindisk_and_minram.py glance/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py glance/db/sqlalchemy/migrate_repo/versions/011_make_mindisk_and_minram_notnull.py glance/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql glance/db/sqlalchemy/migrate_repo/versions/011_sqlite_upgrade.sql glance/db/sqlalchemy/migrate_repo/versions/012_id_to_uuid.py glance/db/sqlalchemy/migrate_repo/versions/013_add_protected.py glance/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql glance/db/sqlalchemy/migrate_repo/versions/014_add_image_tags_table.py glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py glance/db/sqlalchemy/migrate_repo/versions/016_add_status_image_member.py glance/db/sqlalchemy/migrate_repo/versions/016_sqlite_downgrade.sql glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py glance/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py glance/db/sqlalchemy/migrate_repo/versions/019_migrate_image_locations.py glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py glance/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py glance/db/sqlalchemy/migrate_repo/versions/023_placeholder.py glance/db/sqlalchemy/migrate_repo/versions/024_placeholder.py glance/db/sqlalchemy/migrate_repo/versions/025_placeholder.py glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py glance/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py glance/db/sqlalchemy/migrate_repo/versions/028_owner_index.py glance/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py glance/db/sqlalchemy/migrate_repo/versions/030_add_tasks_table.py glance/db/sqlalchemy/migrate_repo/versions/031_remove_duplicated_locations.py glance/db/sqlalchemy/migrate_repo/versions/032_add_task_info_table.py glance/db/sqlalchemy/migrate_repo/versions/033_add_location_status.py glance/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py glance/db/sqlalchemy/migrate_repo/versions/035_add_metadef_tables.py glance/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py glance/db/sqlalchemy/migrate_repo/versions/037_add_changes_to_satisfy_models.py glance/db/sqlalchemy/migrate_repo/versions/037_sqlite_downgrade.sql glance/db/sqlalchemy/migrate_repo/versions/037_sqlite_upgrade.sql glance/db/sqlalchemy/migrate_repo/versions/038_add_metadef_tags_table.py glance/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.py glance/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py glance/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py glance/db/sqlalchemy/migrate_repo/versions/042_add_changes_to_reinstall_unique_metadef_constraints.py glance/db/sqlalchemy/migrate_repo/versions/043_add_image_created_updated_idx.py glance/db/sqlalchemy/migrate_repo/versions/044_update_metadef_os_nova_server.py glance/db/sqlalchemy/migrate_repo/versions/__init__.py glance/domain/__init__.py glance/domain/proxy.py glance/glare/__init__.py glance/glare/dependency.py glance/glare/gateway.py glance/glare/location.py glance/glare/updater.py glance/glare/domain/__init__.py glance/glare/domain/proxy.py glance/hacking/__init__.py glance/hacking/checks.py glance/image_cache/__init__.py glance/image_cache/base.py glance/image_cache/cleaner.py glance/image_cache/client.py glance/image_cache/prefetcher.py glance/image_cache/pruner.py glance/image_cache/drivers/__init__.py glance/image_cache/drivers/base.py glance/image_cache/drivers/sqlite.py glance/image_cache/drivers/xattr.py glance/locale/glance-log-error.pot glance/locale/glance-log-info.pot glance/locale/glance-log-warning.pot glance/locale/glance.pot glance/locale/de/LC_MESSAGES/glance.po glance/locale/en_GB/LC_MESSAGES/glance-log-info.po glance/locale/es/LC_MESSAGES/glance-log-error.po glance/locale/es/LC_MESSAGES/glance-log-info.po glance/locale/es/LC_MESSAGES/glance-log-warning.po glance/locale/es/LC_MESSAGES/glance.po glance/locale/fr/LC_MESSAGES/glance.po glance/locale/it/LC_MESSAGES/glance.po glance/locale/ja/LC_MESSAGES/glance.po glance/locale/ko_KR/LC_MESSAGES/glance-log-error.po glance/locale/ko_KR/LC_MESSAGES/glance-log-info.po glance/locale/ko_KR/LC_MESSAGES/glance-log-warning.po glance/locale/ko_KR/LC_MESSAGES/glance.po glance/locale/pt_BR/LC_MESSAGES/glance-log-info.po glance/locale/pt_BR/LC_MESSAGES/glance-log-warning.po glance/locale/pt_BR/LC_MESSAGES/glance.po glance/locale/ru/LC_MESSAGES/glance.po glance/locale/tr_TR/LC_MESSAGES/glance-log-error.po glance/locale/tr_TR/LC_MESSAGES/glance-log-info.po glance/locale/tr_TR/LC_MESSAGES/glance-log-warning.po glance/locale/tr_TR/LC_MESSAGES/glance.po glance/locale/zh_CN/LC_MESSAGES/glance.po glance/locale/zh_TW/LC_MESSAGES/glance.po glance/quota/__init__.py glance/registry/__init__.py glance/registry/api/__init__.py glance/registry/api/v1/__init__.py glance/registry/api/v1/images.py glance/registry/api/v1/members.py glance/registry/api/v2/__init__.py glance/registry/api/v2/rpc.py glance/registry/client/__init__.py glance/registry/client/v1/__init__.py glance/registry/client/v1/api.py glance/registry/client/v1/client.py glance/registry/client/v2/__init__.py glance/registry/client/v2/api.py glance/registry/client/v2/client.py glance/tests/__init__.py glance/tests/stubs.py glance/tests/test_hacking.py glance/tests/utils.py glance/tests/etc/glance-swift.conf glance/tests/etc/policy.json glance/tests/etc/property-protections-policies.conf glance/tests/etc/property-protections.conf glance/tests/etc/schema-image.json glance/tests/functional/__init__.py glance/tests/functional/store_utils.py glance/tests/functional/test_api.py glance/tests/functional/test_bin_glance_cache_manage.py glance/tests/functional/test_cache_middleware.py glance/tests/functional/test_client_exceptions.py glance/tests/functional/test_client_redirects.py glance/tests/functional/test_cors_middleware.py glance/tests/functional/test_glance_manage.py glance/tests/functional/test_gzip_middleware.py glance/tests/functional/test_healthcheck_middleware.py glance/tests/functional/test_logging.py glance/tests/functional/test_reload.py glance/tests/functional/test_scrubber.py glance/tests/functional/test_sqlite.py glance/tests/functional/test_ssl.py glance/tests/functional/test_wsgi.py glance/tests/functional/db/__init__.py glance/tests/functional/db/base.py glance/tests/functional/db/base_glare.py glance/tests/functional/db/base_metadef.py glance/tests/functional/db/test_registry.py glance/tests/functional/db/test_rpc_endpoint.py glance/tests/functional/db/test_simple.py glance/tests/functional/db/test_sqlalchemy.py glance/tests/functional/glare/__init__.py glance/tests/functional/glare/test_glare.py glance/tests/functional/v1/__init__.py glance/tests/functional/v1/test_api.py glance/tests/functional/v1/test_copy_to_file.py glance/tests/functional/v1/test_misc.py glance/tests/functional/v1/test_multiprocessing.py glance/tests/functional/v2/__init__.py glance/tests/functional/v2/registry_data_api.py glance/tests/functional/v2/test_images.py glance/tests/functional/v2/test_metadef_namespaces.py glance/tests/functional/v2/test_metadef_objects.py glance/tests/functional/v2/test_metadef_properties.py glance/tests/functional/v2/test_metadef_resourcetypes.py glance/tests/functional/v2/test_metadef_tags.py glance/tests/functional/v2/test_schemas.py glance/tests/functional/v2/test_tasks.py glance/tests/integration/__init__.py glance/tests/integration/legacy_functional/__init__.py glance/tests/integration/legacy_functional/base.py glance/tests/integration/legacy_functional/test_v1_api.py glance/tests/integration/v2/__init__.py glance/tests/integration/v2/base.py glance/tests/integration/v2/test_property_quota_violations.py glance/tests/integration/v2/test_tasks_api.py glance/tests/unit/__init__.py glance/tests/unit/base.py glance/tests/unit/fake_rados.py glance/tests/unit/test_auth.py glance/tests/unit/test_cache_middleware.py glance/tests/unit/test_cached_images.py glance/tests/unit/test_context.py glance/tests/unit/test_context_middleware.py glance/tests/unit/test_db.py glance/tests/unit/test_db_metadef.py glance/tests/unit/test_domain.py glance/tests/unit/test_domain_proxy.py glance/tests/unit/test_glance_manage.py glance/tests/unit/test_glance_replicator.py glance/tests/unit/test_glare_plugin_loader.py glance/tests/unit/test_glare_type_definition_framework.py glance/tests/unit/test_image_cache.py glance/tests/unit/test_image_cache_client.py glance/tests/unit/test_jsonpatchmixin.py glance/tests/unit/test_manage.py glance/tests/unit/test_migrations.py glance/tests/unit/test_misc.py glance/tests/unit/test_notifier.py glance/tests/unit/test_policy.py glance/tests/unit/test_quota.py glance/tests/unit/test_schema.py glance/tests/unit/test_scrubber.py glance/tests/unit/test_store_glare.py glance/tests/unit/test_store_image.py glance/tests/unit/test_store_location.py glance/tests/unit/test_versions.py glance/tests/unit/utils.py glance/tests/unit/api/__init__.py glance/tests/unit/api/test_cmd.py glance/tests/unit/api/test_cmd_cache_manage.py glance/tests/unit/api/test_common.py glance/tests/unit/api/test_property_protections.py glance/tests/unit/api/middleware/__init__.py glance/tests/unit/api/middleware/test_cache_manage.py glance/tests/unit/async/__init__.py glance/tests/unit/async/test_async.py glance/tests/unit/async/test_taskflow_executor.py glance/tests/unit/async/flows/__init__.py glance/tests/unit/async/flows/test_convert.py glance/tests/unit/async/flows/test_import.py glance/tests/unit/async/flows/test_introspect.py glance/tests/unit/async/flows/test_ovf_process.py glance/tests/unit/common/__init__.py glance/tests/unit/common/test_client.py glance/tests/unit/common/test_config.py glance/tests/unit/common/test_exception.py glance/tests/unit/common/test_location_strategy.py glance/tests/unit/common/test_property_utils.py glance/tests/unit/common/test_rpc.py glance/tests/unit/common/test_scripts.py glance/tests/unit/common/test_semver.py glance/tests/unit/common/test_signature_utils.py glance/tests/unit/common/test_swift_store_utils.py glance/tests/unit/common/test_timeutils.py glance/tests/unit/common/test_utils.py glance/tests/unit/common/test_wsgi.py glance/tests/unit/common/test_wsgi_ipv6.py glance/tests/unit/common/scripts/__init__.py glance/tests/unit/common/scripts/test_scripts_utils.py glance/tests/unit/common/scripts/image_import/__init__.py glance/tests/unit/common/scripts/image_import/test_main.py glance/tests/unit/v1/__init__.py glance/tests/unit/v1/test_api.py glance/tests/unit/v1/test_registry_api.py glance/tests/unit/v1/test_registry_client.py glance/tests/unit/v1/test_upload_utils.py glance/tests/unit/v2/__init__.py glance/tests/unit/v2/test_image_actions_resource.py glance/tests/unit/v2/test_image_data_resource.py glance/tests/unit/v2/test_image_members_resource.py glance/tests/unit/v2/test_image_tags_resource.py glance/tests/unit/v2/test_images_resource.py glance/tests/unit/v2/test_metadef_resources.py glance/tests/unit/v2/test_registry_api.py glance/tests/unit/v2/test_registry_client.py glance/tests/unit/v2/test_schemas_resource.py glance/tests/unit/v2/test_tasks_resource.py glance/tests/var/ca.crt glance/tests/var/ca.key glance/tests/var/certificate.crt glance/tests/var/privatekey.key glance/tests/var/testserver-bad-ovf.ova glance/tests/var/testserver-no-disk.ova glance/tests/var/testserver-no-ovf.ova glance/tests/var/testserver-not-tar.ova glance/tests/var/testserver.ova rally-jobs/README.rst rally-jobs/glance.yaml rally-jobs/extra/README.rst rally-jobs/extra/fake.img rally-jobs/plugins/README.rst rally-jobs/plugins/plugin_sample.py releasenotes/notes/.placeholder releasenotes/notes/Prevent-removing-last-image-location-d5ee3e00efe14f34.yaml releasenotes/notes/bug-1537903-54b2822eac6cfc09.yaml releasenotes/notes/deprecate-glance-api-opts-23bdbd1ad7625999.yaml releasenotes/notes/make-task-api-admin-only-by-default-7def996262e18f7a.yaml releasenotes/notes/new_image_filters-c888361e6ecf495c.yaml releasenotes/notes/remove-osprofiler-paste-ini-options-c620dedc8f9728ff.yaml releasenotes/notes/trust-support-registry-cfd17a6a9ab21d70.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/colorizer.py tools/install_venv.py tools/install_venv_common.py tools/migrate_image_owners.py tools/with_venv.shglance-12.0.0/glance.egg-info/PKG-INFO0000664000567000056710000000212712701407203020202 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: glance Version: 12.0.0 Summary: OpenStack Image Service Home-page: http://docs.openstack.org/developer/glance/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ====== Glance ====== Glance is a project that defines services for discovering, registering, retrieving and storing virtual machine images. Use the following resources to learn more: * `Official Glance documentation `_ * `Official Client documentation `_ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 glance-12.0.0/glance.egg-info/top_level.txt0000664000567000056710000000000712701407203021632 0ustar jenkinsjenkins00000000000000glance glance-12.0.0/glance.egg-info/pbr.json0000664000567000056710000000005612701407203020562 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "02ef2ab"}glance-12.0.0/glance.egg-info/entry_points.txt0000664000567000056710000000305012701407203022377 0ustar jenkinsjenkins00000000000000[console_scripts] glance-api = glance.cmd.api:main glance-cache-cleaner = glance.cmd.cache_cleaner:main glance-cache-manage = glance.cmd.cache_manage:main glance-cache-prefetcher = glance.cmd.cache_prefetcher:main glance-cache-pruner = glance.cmd.cache_pruner:main glance-control = glance.cmd.control:main glance-glare = glance.cmd.glare:main glance-manage = glance.cmd.manage:main glance-registry = glance.cmd.registry:main glance-replicator = glance.cmd.replicator:main glance-scrubber = glance.cmd.scrubber:main [glance.artifacts.types] MyArtifact = glance.contrib.plugins.artifacts_sample:MY_ARTIFACT [glance.common.image_location_strategy.modules] location_order_strategy = glance.common.location_strategy.location_order store_type_strategy = glance.common.location_strategy.store_type [glance.database.metadata_backend] sqlalchemy = glance.db.sqlalchemy.metadata [glance.database.migration_backend] sqlalchemy = oslo_db.sqlalchemy.migration [glance.flows] import = glance.async.flows.base_import:get_flow [glance.flows.import] convert = glance.async.flows.convert:get_flow introspect = glance.async.flows.introspect:get_flow ovf_process = glance.async.flows.ovf_process:get_flow [oslo.config.opts] glance.api = glance.opts:list_api_opts glance.cache = glance.opts:list_cache_opts glance.glare = glance.opts:list_artifacts_opts glance.manage = glance.opts:list_manage_opts glance.registry = glance.opts:list_registry_opts glance.scrubber = glance.opts:list_scrubber_opts [oslo.config.opts.defaults] glance.api = glance.common.config:set_cors_middleware_defaults glance-12.0.0/tox.ini0000664000567000056710000000663412701407047015512 0ustar jenkinsjenkins00000000000000[tox] minversion = 1.6 envlist = py34,py27,pep8 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} usedevelop = True install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} deps = -r{toxinidir}/test-requirements.txt commands = lockutils-wrapper python setup.py testr --slowest --testr-args='{posargs}' whitelist_externals = bash passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY [testenv:releasenotes] # NOTE(jaegerandi): this target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:debug] commands = oslo_debug_helper {posargs} [testenv:debug-py27] basepython = python2.7 commands = oslo_debug_helper {posargs} [testenv:debug-py34] basepython = python3.4 commands = oslo_debug_helper {posargs} [testenv:py34] commands = lockutils-wrapper python setup.py testr --slowest --testr-args='glance.tests.unit' python -m testtools.run \ glance.tests.functional.db.test_simple \ glance.tests.functional.test_client_exceptions \ glance.tests.functional.v1.test_multiprocessing [testenv:pep8] commands = flake8 {posargs} # Run security linter bandit -c bandit.yaml -r glance -n5 -p gate # Check that .po and .pot files are valid: bash -c "find glance -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:cover] # NOTE(jaegerandi): this target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} setenv = VIRTUAL_ENV={envdir} commands = python setup.py testr --coverage --testr-args='^(?!.*test.*coverage).*$' [testenv:venv] # NOTE(jaegerandi): this target does not use constraints because # upstream infra does not yet support it. Once that's fixed, we can # drop the install_command. install_command = pip install -U --force-reinstall {opts} {packages} commands = {posargs} [testenv:genconfig] commands = oslo-config-generator --config-file etc/oslo-config-generator/glance-api.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-registry.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-scrubber.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-cache.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-manage.conf oslo-config-generator --config-file etc/oslo-config-generator/glance-glare.conf [testenv:docs] commands = python setup.py build_sphinx [testenv:bandit] commands = bandit -c bandit.yaml -r glance -n5 -p gate [flake8] # TODO(dmllr): Analyze or fix the warnings blacklisted below # E711 comparison to None should be 'if cond is not None:' # E712 comparison to True should be 'if cond is True:' or 'if cond:' # H404 multi line docstring should start with a summary # H405 multi line docstring summary not separated with an empty line ignore = E711,E712,H404,H405 exclude = .venv,.git,.tox,dist,doc,etc,*glance/locale*,*lib/python*,*egg,build [hacking] local-check-factory = glance.hacking.checks.factory import_exceptions = glance.i18n glance-12.0.0/bandit.yaml0000664000567000056710000002553312701407047016323 0ustar jenkinsjenkins00000000000000# optional: after how many files to update progress #show_progress_every: 100 # optional: plugins directory name #plugins_dir: 'plugins' # optional: plugins discovery name pattern plugin_name_pattern: '*.py' # optional: terminal escape sequences to display colors #output_colors: # DEFAULT: '\033[0m' # HEADER: '\033[95m' # LOW: '\033[94m' # MEDIUM: '\033[93m' # HIGH: '\033[91m' # optional: log format string #log_format: "[%(module)s]\t%(levelname)s\t%(message)s" # globs of files which should be analyzed include: - '*.py' - '*.pyw' # a list of strings, which if found in the path will cause files to be excluded # for example /tests/ - to remove all all files in tests directory exclude_dirs: - '/tests/' profiles: gate: include: - any_other_function_with_shell_equals_true - assert_used - blacklist_calls - blacklist_import_func # One of the blacklisted imports is the subprocess module. Keystone # has to import the subprocess module in a single module for # eventlet support so in most cases bandit won't be able to detect # that subprocess is even being imported. Also, Bandit's # recommendation is just to check that the use is safe without any # documentation on what safe or unsafe usage is. So this test is # skipped. # - blacklist_imports - exec_used - execute_with_run_as_root_equals_true # - hardcoded_bind_all_interfaces # TODO: enable this test # Not working because wordlist/default-passwords file not bundled, # see https://bugs.launchpad.net/bandit/+bug/1451575 : # - hardcoded_password # Not used because it's prone to false positives: # - hardcoded_sql_expressions # - hardcoded_tmp_directory # TODO: enable this test - jinja2_autoescape_false - linux_commands_wildcard_injection - paramiko_calls - password_config_option_not_marked_secret - request_with_no_cert_validation - set_bad_file_permissions - subprocess_popen_with_shell_equals_true # - subprocess_without_shell_equals_true # TODO: enable this test - start_process_with_a_shell # - start_process_with_no_shell # TODO: enable this test - start_process_with_partial_path - ssl_with_bad_defaults - ssl_with_bad_version - ssl_with_no_version # - try_except_pass # TODO: enable this test - use_of_mako_templates blacklist_calls: bad_name_sets: # - pickle: # qualnames: [pickle.loads, pickle.load, pickle.Unpickler, # cPickle.loads, cPickle.load, cPickle.Unpickler] # message: "Pickle library appears to be in use, possible security issue." # TODO: enable this test - marshal: qualnames: [marshal.load, marshal.loads] message: "Deserialization with the marshal module is possibly dangerous." # - md5: # qualnames: [hashlib.md5, Crypto.Hash.MD2.new, Crypto.Hash.MD4.new, Crypto.Hash.MD5.new, cryptography.hazmat.primitives.hashes.MD5] # message: "Use of insecure MD2, MD4, or MD5 hash function." # TODO: enable this test - mktemp_q: qualnames: [tempfile.mktemp] message: "Use of insecure and deprecated function (mktemp)." - eval: qualnames: [eval] message: "Use of possibly insecure function - consider using safer ast.literal_eval." - mark_safe: names: [mark_safe] message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed." - httpsconnection: qualnames: [httplib.HTTPSConnection] message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033" - yaml_load: qualnames: [yaml.load] message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()." - urllib_urlopen: qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request] message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected." - random: qualnames: [random.random, random.randrange, random.randint, random.choice, random.uniform, random.triangular] message: "Standard pseudo-random generators are not suitable for security/cryptographic purposes." level: "LOW" # Most of this is based off of Christian Heimes' work on defusedxml: # https://pypi.python.org/pypi/defusedxml/#defusedxml-sax # TODO(jaegerandi): Enable once defusedxml is in global requirements. #- xml_bad_cElementTree: # qualnames: [xml.etree.cElementTree.parse, # xml.etree.cElementTree.iterparse, # xml.etree.cElementTree.fromstring, # xml.etree.cElementTree.XMLParser] # message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." #- xml_bad_ElementTree: # qualnames: [xml.etree.ElementTree.parse, # xml.etree.ElementTree.iterparse, # xml.etree.ElementTree.fromstring, # xml.etree.ElementTree.XMLParser] # message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - xml_bad_expatreader: qualnames: [xml.sax.expatreader.create_parser] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - xml_bad_expatbuilder: qualnames: [xml.dom.expatbuilder.parse, xml.dom.expatbuilder.parseString] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - xml_bad_sax: qualnames: [xml.sax.parse, xml.sax.parseString, xml.sax.make_parser] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - xml_bad_minidom: qualnames: [xml.dom.minidom.parse, xml.dom.minidom.parseString] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - xml_bad_pulldom: qualnames: [xml.dom.pulldom.parse, xml.dom.pulldom.parseString] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." - xml_bad_etree: qualnames: [lxml.etree.parse, lxml.etree.fromstring, lxml.etree.RestrictedElement, lxml.etree.GlobalParserTLS, lxml.etree.getDefaultParser, lxml.etree.check_docinfo] message: "Using {func} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {func} with it's defusedxml equivilent function." shell_injection: # Start a process using the subprocess module, or one of its wrappers. subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, utils.execute, utils.execute_with_timeout] # Start a process with a function vulnerable to shell injection. shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # Start a process with a function that is not vulnerable to shell injection. no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve, os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe, os.startfile] blacklist_imports: bad_import_sets: - telnet: imports: [telnetlib] level: HIGH message: "Telnet is considered insecure. Use SSH or some other encrypted protocol." - info_libs: imports: [pickle, cPickle, subprocess, Crypto] level: LOW message: "Consider possible security implications associated with {module} module." # Most of this is based off of Christian Heimes' work on defusedxml: # https://pypi.python.org/pypi/defusedxml/#defusedxml-sax - xml_libs: imports: [xml.etree.cElementTree, xml.etree.ElementTree, xml.sax.expatreader, xml.sax, xml.dom.expatbuilder, xml.dom.minidom, xml.dom.pulldom, lxml.etree, lxml] message: "Using {module} to parse untrusted XML data is known to be vulnerable to XML attacks. Replace {module} with the equivilent defusedxml package." level: LOW - xml_libs_high: imports: [xmlrpclib] message: "Using {module} to parse untrusted XML data is known to be vulnerable to XML attacks. Use defused.xmlrpc.monkey_patch() function to monkey-patch xmlrpclib and mitigate XML vulnerabilities." level: HIGH hardcoded_tmp_directory: tmp_dirs: ['/tmp', '/var/tmp', '/dev/shm'] hardcoded_password: # Support for full path, relative path and special "%(site_data_dir)s" # substitution (/usr/{local}/share) word_list: "%(site_data_dir)s/wordlist/default-passwords" ssl_with_bad_version: bad_protocol_versions: - 'PROTOCOL_SSLv2' - 'SSLv2_METHOD' - 'SSLv23_METHOD' - 'PROTOCOL_SSLv3' # strict option - 'PROTOCOL_TLSv1' # strict option - 'SSLv3_METHOD' # strict option - 'TLSv1_METHOD' # strict option password_config_option_not_marked_secret: function_names: - oslo.config.cfg.StrOpt - oslo_config.cfg.StrOpt execute_with_run_as_root_equals_true: function_names: - ceilometer.utils.execute - cinder.utils.execute - neutron.agent.linux.utils.execute - nova.utils.execute - nova.utils.trycmd try_except_pass: check_typed_exception: True glance-12.0.0/pylintrc0000664000567000056710000000145512701407047015762 0ustar jenkinsjenkins00000000000000[Messages Control] # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. disable-msg=W0511,W0142,W0622 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=[a-z_][a-z0-9_]{2,50}$ # Module names matching nova-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 glance-12.0.0/.testr.conf0000664000567000056710000000051612701407047016256 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ ${PYTHON:-python} -m subunit.run discover -t ./ ./glance/tests $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list glance-12.0.0/AUTHORS0000664000567000056710000004106012701407203015231 0ustar jenkinsjenkins00000000000000Aaron Rosen Abhijeet Malawade Abhishek Chanda Abhishek Kekane Adam Gandelman Adam Gandelman Ajaya Agrawal Ala Rezmerita Alberto Planas Alessandro Pilotti Alessio Ababilov Alessio Ababilov Alex Gaynor Alex Meade Alexander Gordeev Alexander Maretskiy Alexander Tivelkov Alexei Kornienko Alexey Galkin Amala Basha AmalaBasha AmalaBasha Anastasia Vlaskina Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Andrew Tranquada Andrey Brindeyev Andy McCrae Anita Kuno Arnaud Legendre Artur Svechnikov Ashish Jain Ashwini Shukla Aswad Rangnekar Attila Fazekas Auktavian Garrett Avinash Prasad Balazs Gibizer Bartosz Fic Ben Nemec Ben Roble Bernhard M. Wiedemann Bertrand Lallau Bhuvan Arumugam Bo Wang Boris Pavlovic Brant Knudson Brian Cline Brian D. Elliott Brian Elliott Brian Elliott Brian Lamar Brian Rosmaita Brian Waldon Brianna Poulos Béla Vancsics Cerberus Chang Bo Guo ChangBo Guo(gcb) Chmouel Boudjnah Chris Allnutt Chris Behrens Chris Buccella Chris Buccella Chris Fattarsi Chris St. Pierre Christian Berendt Christopher MacGown Chuck Short Cindy Pallares Clark Boylan Cory Benfield Cory Wright Cyril Roelandt Dan Prince Dane Fichter Daniel Krook Danny Al-Gaaf Darja Shakhray Davanum Srinivas Davanum Srinivas Dave Chen Dave McNally Dave Walker (Daviey) David Koo David Peraza David Ripton David Sariel Dean Troyer Deepti Ramakrishna DennyZhang Derek Higgins Desmond Sponsor Dina Belova Dinesh Bhor Dirk Mueller Dmitry Kulishenko Dolph Mathews Donal Lafferty Doron Chen Doug Hellmann Doug Hellmann Drew Varner Drew Varner Duncan McGreggor Eddie Sheffield Edgar Magana Edward Hope-Morley Eldar Nugaev Elena Ezhova Eoghan Glynn Eric Brown Eric Windisch Erno Kuvaja Erno Kuvaja Eugeniya Kudryashova Ewan Mellor Fabio M. Di Nitto Fei Long Wang Fei Long Wang Fengqian Gao Flaper Fesp Flavio Percoco Florent Flament Gabriel Hurley Gauvain Pocentek Geetika Batra George Peristerakis Georgy Okrokvertskhov Gerardo Porras Gorka Eguileor Grant Murphy Gregory Haynes Haiwei Xu Harsh Shah Harshada Mangesh Kakad He Yongli Hemanth Makkapati Hemanth Makkapati Hengqing Hu Henrique Truta Hirofumi Ichihara Hui Xiang Ian Cordasco Ian Cordasco Iccha Sethi Igor A. Lukyanenkov Ihar Hrachyshka Ildiko Vancsa Ilya Pekelny Inessa Vasilevskaya Ionuț Arțăriși Isaku Yamahata J. Daniel Schmidt Jakub Ruzicka James Carey James E. Blair James Li James Morgan James Polley Jamie Lennox Jared Culp Jasakov Artem Jason Koelker Jason Kölker Javeme Jay Pipes Jeremy Stanley Jesse Andrews Jesse J. Cook Jia Dong Jin Long Wang Jinwoo 'Joseph' Suh Joe Gordon Joe Gordon Johannes Erdfelt John Bresnahan John Lenihan John Warren Jon Bernard Jorge Niedbalski Joseph Suh Josh Durgin Josh Durgin Josh Kearney Joshua Harlow JuPing Juan Manuel Olle Juerg Haefliger Julia Varlamova Julien Danjou Jun Hong Li Justin Santa Barbara Justin Shepherd KIYOHIRO ADACHI Kamil Rykowski Karol Stepniewski Kasey Alusi Ken Pepple Ken Thomas Kent Wang Kentaro Takeda Keshava Bharadwaj Kevin L. Mitchell Kirill Zaitsev Kui Shi Kun Huang Lakshmi N Sampath Lars Gellrich Leam Leandro I. Costantino Lianhao Lu Lin Yang Liu Yuan Long Quan Sha Lorin Hochstein Louis Taylor Louis Taylor Luis A. Garcia Lyubov Kolesnikova Major Hayden Mark J. Washenberger Mark J. Washenberger Mark McLoughlin Mark Washenberger Martin Kletzander Martin Mágr Martin Tsvetanov Maru Newby Masashi Ozawa Matt Dietz Matt Fischer Matt Riedemann Matthew Booth Matthew Edmonds Matthew Treinish Matthias Schmitz Maurice Leeflang Mauro S. M. Rodrigues Michael J Fork Michael Krotscheck Michael Still Michal Dulko Mike Fedosin Mike Lundy Mingda Sun Mitsuhiro SHIGEMATSU Mitsuhiro Tanino Monty Taylor Munoz, Obed N NAO NISHIJIMA Nassim Babaci Niall Bunting Niall Bunting NiallBunting Nicholas Kuechler Nicolas Simonds Nikhil Komawar Nikhil Komawar Nikolaj Starodubtsev Noboru Arai Noboru arai Oleksii Chuprykov Olena Logvinova Ondřej Nový Pamela-Rose Virtucio Patrick Mezard Paul Bourke Paul Bourke Paul McMillan Pavan Kumar Sunkara Pawel Koniszewski Pawel Skowron Peng Yong Pete Zaitcev Pranali Deore PranaliDeore Preetika Pádraig Brady Pádraig Brady Qiaowei Ren Radu Rainya Mosher Rajesh Tailor Ray Chen Reynolds Chin Rick Clark Rick Harris Robert Collins Rohan Kanade Roman Bogorodskiy Roman Bogorodskiy Roman Vasilets Ronald Bradford Rongze Zhu RongzeZhu Rui Zang Russell Bryant Russell Sim Sabari Kumar Murugesan Sachi King Sam Morrison Sam Stavinoha Samuel Merritt Sascha Peilicke Sascha Peilicke Sathish Nagappan Sean Dague Sean Dague Sergey Nikitin Sergey Skripnick Sergey Vilgelm Sergey Vilgelm Sergio Cazzolato Shane Wang Shuquan Huang Soren Hansen Stan Lagun Steve Kowalik Steve Lewis Stuart McLaren Sulochan Acharya Svetlana Shturm Takeaki Matsumoto Taku Fukushima Tatyana Leontovich Therese McHale Thierry Carrez Thomas Bechtold Thomas Bechtold Thomas Leaman Tim Daly, Jr Toan Nguyen Tom Cocozzello Tom Hancock Tom Leaman Tomas Hancock Tomoki Sekiyama Travis Tripp Travis Tripp Unmesh Gurjar Unmesh Gurjar Vaibhav Bhatkar Venkatesh Sampath Venkatesh Sampath Victor Morales Victor Sergeyev Victor Stinner Vincent Untz Vishvananda Ishaya Vitaliy Kolosov Vyacheslav Vakhlyuev Waldemar Znoinski Wayne A. Walls Wayne Okuma Wen Cheng Ma Wu Wenxiang Xi Yang XiaBing Yao YAMAMOTO Takashi Yaguang Tang Yanis Guenane Yufang Zhang Yuiko Takada Yuriy Taraday Yusuke Ide ZHANG Hua Zhenguo Niu Zhenguo Niu Zhi Yan Liu ZhiQiang Fan ZhiQiang Fan Zhiteng Huang Zhongyue Luo abhishek-kekane abhishekkekane amalaba ankitagrawal ankur annegentle april bria4010 daisy-ycguo dangming eddie-sheffield eos2102 gengjh henriquetruta houming-wang huangtianhua hzrandd <82433422@qq.com> iccha iccha-sethi iccha.sethi isethi itisha jakedahn jare6412 jaypipes@gmail.com <> jinxingfang jola-mirecka kairat_kushaev lawrancejing leo.young leseb ling-yun liuqing liyingjun liyingjun lizheming llg8212 ls1175 makocchi marianitadn mathrock nanhai liao pran1990 ravikumar-venkatesan rsritesh sai krishna sripada sarvesh-ranjan shreeduth-awasthi shrutiranade38 shu,xinxin sridevik sridevik tanlin ting.wang tmcpeak tobe venkatamahesh wanghong wangxiyuan yangxurong yongiman zhu.rong zwei glance-12.0.0/requirements.txt0000664000567000056710000000342712701407051017453 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=1.6 # Apache-2.0 # < 0.8.0/0.8 does not work, see https://bugs.launchpad.net/bugs/1153983 SQLAlchemy<1.1.0,>=1.0.10 # MIT eventlet!=0.18.3,>=0.18.2 # MIT PasteDeploy>=1.5.0 # MIT Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7' # MIT Routes!=2.0,>=1.12.3;python_version!='2.7' # MIT WebOb>=1.2.3 # MIT sqlalchemy-migrate>=0.9.6 # Apache-2.0 httplib2>=0.7.5 # MIT pycrypto>=2.6 # Public Domain oslo.config>=3.7.0 # Apache-2.0 oslo.concurrency>=3.5.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.service>=1.0.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0 futurist>=0.11.0 # Apache-2.0 taskflow>=1.26.0 # Apache-2.0 keystoneauth1>=2.1.0 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 WSME>=0.8 # MIT PrettyTable<0.8,>=0.7 # BSD # For paste.util.template used in keystone.common.template Paste # MIT jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 pyOpenSSL>=0.14 # Apache-2.0 # Required by openstack.common libraries six>=1.9.0 # MIT oslo.db>=4.1.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.messaging>=4.0.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 osprofiler>=1.1.0 # Apache-2.0 # Glance Store glance-store>=0.13.0 # Apache-2.0 # Artifact repository semantic-version>=2.3.1 # BSD castellan>=0.3.1 # Apache-2.0 cryptography>=1.0 # BSD/Apache-2.0 debtcollector>=1.2.0 # Apache-2.0 # timeutils iso8601>=0.1.9 # MIT monotonic>=0.6 # Apache-2.0 glance-12.0.0/HACKING.rst0000664000567000056710000000244012701407047015764 0ustar jenkinsjenkins00000000000000glance Style Commandments ======================= - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on glance Specific Commandments -------------------------- - [G316] Change assertTrue(isinstance(A, B)) by optimal assert like assertIsInstance(A, B) - [G317] Change assertEqual(type(A), B) by optimal assert like assertIsInstance(A, B) - [G318] Change assertEqual(A, None) or assertEqual(None, A) by optimal assert like assertIsNone(A) - [G319] Validate that debug level logs are not translated - [G320] For python 3 compatibility, use six.text_type() instead of unicode() - [G321] Validate that LOG messages, except debug ones, have translations - [G322] Validate that LOG.info messages use _LI. - [G323] Validate that LOG.exception messages use _LE. - [G324] Validate that LOG.error messages use _LE. - [G325] Validate that LOG.critical messages use _LC. - [G326] Validate that LOG.warning messages use _LW. - [G327] Prevent use of deprecated contextlib.nested - [G328] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs - [G329] Python 3: Do not use xrange. - [G330] Python 3: do not use dict.iteritems. - [G331] Python 3: do not use dict.iterkeys. - [G332] Python 3: do not use dict.itervalues. glance-12.0.0/CONTRIBUTING.rst0000664000567000056710000000106212701407047016626 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/glance glance-12.0.0/releasenotes/0000775000567000056710000000000012701407204016652 5ustar jenkinsjenkins00000000000000glance-12.0.0/releasenotes/notes/0000775000567000056710000000000012701407204020002 5ustar jenkinsjenkins00000000000000glance-12.0.0/releasenotes/notes/Prevent-removing-last-image-location-d5ee3e00efe14f34.yaml0000664000567000056710000000062712701407047032350 0ustar jenkinsjenkins00000000000000--- security: - Fixing bug 1525915; image might be transitioning from active to queued by regular user by removing last location of image (or replacing locations with empty list). This allows user to re-upload data to the image breaking Glance's promise of image data immutability. From now on, last location cannot be removed and locations cannot be replaced with empty list. glance-12.0.0/releasenotes/notes/make-task-api-admin-only-by-default-7def996262e18f7a.yaml0000664000567000056710000000133212701407047031656 0ustar jenkinsjenkins00000000000000--- deprecations: - The task API was added to allow users for uploading images asynchronously and for deployers to have more control in the upload process. Unfortunately, this API has not worked the way it was expected to. Therefore, the task API has entered a deprecation period and it is meant to be replaced by the new import API. This change makes the task API admin only by default so that it is not accidentally deployed as a public API. upgrade: - The task API is being deprecated and it has been made admin only. If deployers of Glance would like to have this API as a public one, it is necessary to change the `policy.json` file and remove `role:admin` from every `task` related field.glance-12.0.0/releasenotes/notes/.placeholder0000664000567000056710000000000012701407047022260 0ustar jenkinsjenkins00000000000000glance-12.0.0/releasenotes/notes/trust-support-registry-cfd17a6a9ab21d70.yaml0000664000567000056710000000060212701407047027776 0ustar jenkinsjenkins00000000000000--- features: - Implemented re-authentication with trusts when updating image status in registry after image upload. When long-running image upload takes some a lot of time (more than token expiration time) glance uses trusts to receive new token and update image status in registry. It allows users to upload big size images without increasing token expiration time. glance-12.0.0/releasenotes/notes/new_image_filters-c888361e6ecf495c.yaml0000664000567000056710000000144512701407047026622 0ustar jenkinsjenkins00000000000000--- features: - Implement the ability to filter images by the properties `id`, `name`, `status`,`container_format`, `disk_format` using the 'in' operator between the values. Following the pattern of existing filters, new filters are specified as query parameters using the field to filter as the key and the filter criteria as the value in the parameter. Filtering based on the principle of full compliance with the template, for example 'name = in:deb' does not match 'debian'. Changes apply exclusively to the API v2 Image entity listings An example of an acceptance criteria using the 'in' operator for name ?name=in:name1,name2,name3. These filters were added using syntax that conforms to the latest guidelines from the OpenStack API Working Group. glance-12.0.0/releasenotes/notes/deprecate-glance-api-opts-23bdbd1ad7625999.yaml0000664000567000056710000000047512701407047030041 0ustar jenkinsjenkins00000000000000--- deprecations: - The use_user_token, admin_user, admin_password, admin_tenant_name, auth_url, auth_strategy and auth_region options in the [DEFAULT] configuration section in glance-api.conf are deprecated, and will be removed in the O release. See https://wiki.openstack.org/wiki/OSSN/OSSN-0060 glance-12.0.0/releasenotes/notes/bug-1537903-54b2822eac6cfc09.yaml0000664000567000056710000000100312701407047024565 0ustar jenkinsjenkins00000000000000--- upgrade: - Metadata definitions previously associated with OS::Nova::Instance have been changed to be associated with OS::Nova::Server in order to align with Heat and Searchlight. You may either upgrade them using glance-manage db load_metadefs [path] [merge] [prefer_new] or glance-manage db upgrade 44. fixes: - Metadata definitions previously associated with OS::Nova::Instance have been changed to be associated with OS::Nova::Server in order to align with Heat and Searchlight. glance-12.0.0/releasenotes/notes/remove-osprofiler-paste-ini-options-c620dedc8f9728ff.yaml0000664000567000056710000000130712701407047032327 0ustar jenkinsjenkins00000000000000--- deprecations: - OSprofiler support requires passing of trace information between various OpenStack services. This information is signed by one of HMAC keys, which we historically defined in glance-api-paste.ini and glance-registry-paste.ini files (together with enabled option, that in fact was duplicated in the corresponding configuration files). OSprofiler 0.3.1 and higher supports passing this information via configuration files, therefore it's recommended to modify the ``[filter:osprofiler]`` section in \*-paste.ini to look like ``paste.filter_factor = osprofiler.web:WsgiMiddleware.factory`` and set the ``hmac_keys`` option in the glance-\*.conf files. glance-12.0.0/releasenotes/source/0000775000567000056710000000000012701407204020152 5ustar jenkinsjenkins00000000000000glance-12.0.0/releasenotes/source/index.rst0000664000567000056710000000017412701407051022015 0ustar jenkinsjenkins00000000000000====================== Glance Release Notes ====================== .. toctree:: :maxdepth: 1 liberty unreleased glance-12.0.0/releasenotes/source/_templates/0000775000567000056710000000000012701407204022307 5ustar jenkinsjenkins00000000000000glance-12.0.0/releasenotes/source/_templates/.placeholder0000664000567000056710000000000012701407047024565 0ustar jenkinsjenkins00000000000000glance-12.0.0/releasenotes/source/unreleased.rst0000664000567000056710000000016012701407047023035 0ustar jenkinsjenkins00000000000000============================== Current Series Release Notes ============================== .. release-notes:: glance-12.0.0/releasenotes/source/liberty.rst0000664000567000056710000000022212701407047022357 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty glance-12.0.0/releasenotes/source/conf.py0000664000567000056710000002156012701407047021462 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Glance Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Glance Release Notes' copyright = u'2015, Glance Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from glance.version import version_info as glance_version # The full version, including alpha/beta/rc tags. release = glance_version.version_string_with_vcs() # The short X.Y version. version = glance_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'GlanceReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation', u'Glance Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'glancereleasenotes', u'Glance Release Notes Documentation', [u'Glance Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation', u'Glance Developers', 'GlanceReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False glance-12.0.0/releasenotes/source/_static/0000775000567000056710000000000012701407204021600 5ustar jenkinsjenkins00000000000000glance-12.0.0/releasenotes/source/_static/.placeholder0000664000567000056710000000000012701407047024056 0ustar jenkinsjenkins00000000000000glance-12.0.0/README.rst0000664000567000056710000000055112701407047015656 0ustar jenkinsjenkins00000000000000====== Glance ====== Glance is a project that defines services for discovering, registering, retrieving and storing virtual machine images. Use the following resources to learn more: * `Official Glance documentation `_ * `Official Client documentation `_ glance-12.0.0/.mailmap0000664000567000056710000000226712701407047015616 0ustar jenkinsjenkins00000000000000# Format is: # # Zhongyue Luo Zhenguo Niu David Koo glance-12.0.0/setup.py0000664000567000056710000000200412701407047015674 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) glance-12.0.0/test-requirements.txt0000664000567000056710000000206312701407047020430 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Hacking already pins down pep8, pyflakes and flake8 hacking<0.11,>=0.10.0 # For translations processing Babel>=1.3 # BSD # Needed for testing bandit>=0.17.3 # Apache-2.0 coverage>=3.6 # Apache-2.0 fixtures>=1.3.1 # Apache-2.0/BSD mox3>=0.7.0 # Apache-2.0 mock>=1.2 # BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD requests!=2.9.0,>=2.8.1 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT psutil<2.0.0,>=1.1.1 # BSD oslotest>=1.10.0 # Apache-2.0 # Optional packages that should be installed when testing PyMySQL>=0.6.2 # MIT License psycopg2>=2.5 # LGPL/ZPL pysendfile>=2.0.0 # MIT qpid-python;python_version=='2.7' # Apache-2.0 xattr>=0.4 # MIT python-swiftclient>=2.2.0 # Apache-2.0 # Documentation oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 reno>=0.1.1 # Apache2